content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def signup() -> Response | str | tuple[dict[str, str | int], int]:
"""Sign up"""
# Bypass if user is logged in
if current_user.is_authenticated:
return redirect(url_for("home"))
# Process user data
try:
# Return template if request.method is GET
assert request.method != "GET"
# Process form
username, password, remember = _get_auth_form()
assert username and password
# Check if user with given username already exists
if database.get_instance(models.User, username=username):
flash(
f"User with {username} username already exists.<br>"
f'Go to <a href="{url_for("auth.login")}">login page</a>.'
)
assert False
except AssertionError:
return render_template("signup.html")
# Add user to database and login
user = database.add_instance(
models.User,
lambda i: i.set_password(password),
username=username,
)
login_user(user, remember=remember)
# Return json response or redirect to home
if request.form.get("raw"):
response = {
"info": f"Successfully signed up as {username}.",
"status": 200,
}
return response, 200
return redirect(url_for("home"))
|
9496c11a9015b69c7cf2f89d19a45f93405f5dfe
| 3,640,981
|
import copy
def evaluate_all_configs(hparams, agent_model_dir):
"""Evaluate the agent with multiple eval configurations."""
def make_eval_hparams(hparams, policy_to_action, max_num_noops):
hparams = copy.copy(hparams)
hparams.add_hparam("num_agents", hparams.eval_num_agents)
hparams.add_hparam("policy_to_actions_lambda", {
"sample": lambda policy: policy.sample(),
"mode": lambda policy: policy.mode()
}[policy_to_action])
hparams.max_num_noops = max_num_noops
return hparams
metrics = {}
# Iterate over all combinations of picking actions by sampling/mode and
# whether to do initial no-ops.
for policy_to_action in ("mode", "sample"):
for max_num_noops in (hparams.eval_max_num_noops, 0):
eval_hparams = make_eval_hparams(hparams, policy_to_action, max_num_noops)
scores = evaluate_single_config(eval_hparams, agent_model_dir)
for (score, clipped) in zip(scores, (True, False)):
metric_name = "mean_reward/eval/{}_{}_max_noops_{}".format(
policy_to_action, max_num_noops,
"clipped" if clipped else "unclipped"
)
metrics[metric_name] = score
return metrics
|
107fb692adec1ce7dbb580c30e3e7b0402874054
| 3,640,982
|
from typing import Tuple
def shape(a: Matrix) -> Tuple[int, int]:
""" Returns the num of rows and columns of A """
num_rows = len(a)
num_cols = len(a[0]) if a else 0 # number of elements so columns in first element if it exists
return num_rows, num_cols
|
24ef9045e86b6027f76ddf57bf5eba44553798c5
| 3,640,983
|
def solve(A, b):
"""
:param A: Matrix R x C
:param b: Vector R
:return: Vector C 'x' solving Ax=b
>>> M = Mat(({'a', 'b', 'c', 'd'}, {'A', 'B', 'C', 'D'}), { \
('a', 'A'): one, ('a', 'B'): one, ('a', 'D'): one, \
('b', 'A'): one, ('b', 'D'): one, \
('c', 'A'): one, ('c', 'B'): one, ('c', 'C'): one, ('c', 'D'): one, \
('d', 'C'): one, ('d', 'D'): one \
})
>>> v = Vec(M.D[0], {'a': one, 'c': one})
>>> solve(M, v)
"""
M = transformation(A)
U = M*A
col_label_list = sorted(A.D[1])
U_rows_dict = mat2rowdict(U)
row_list = [U_rows_dict[i] for i in sorted(U_rows_dict)]
# return echelon_solve(row_list,col_label_list, M*b)
# print(row_list, col_label_list, repr(M * b))
return row_list, col_label_list, M * b
|
b25a762ee8f8229d1bc573c828a7151770d3240c
| 3,640,984
|
def token_groups(self):
"""The groups the Token owner is a member of."""
return self.created_by.groups
|
9db411660db1def09b8dc52db800ca4c09a38cce
| 3,640,985
|
import requests
def get_html_content_in_text(url):
"""
Grab all the content in webpage url and return it's content in text.
Arguments:
url -- a webpage url string.
Returns:
r.text -- the content of webpage in text.
"""
r = requests.get(url)
return r.text
|
fd8ddc992f34c186051ca8985ffb110c50004970
| 3,640,986
|
def subscribe():
"""Subscribe new message"""
webhook_url = request.form.get("webhook_url")
header_key = request.form.get("header_key")
header_value = request.form.get("header_value")
g.driver.subscribe_new_messages(NewMessageObserver(webhook_url, header_key, header_value))
return jsonify({"success": True})
|
97a47fb298bbc0bac3e333037210556525dd837f
| 3,640,987
|
def SegAlign(ea, alignment):
"""
Change alignment of the segment
@param ea: any address in the segment
@param alignment: new alignment of the segment (one of the sa... constants)
@return: success (boolean)
"""
return SetSegmentAttr(ea, SEGATTR_ALIGN, alignment)
|
c0c380e194fbed43b87be81108eecf864809c447
| 3,640,988
|
import requests
def create_bitlink(logger, headers='', long_url='google.com'):
"""
Функция создает короткие ссылки из длинных
:param logger: logger object
:param headers: Generic Access Token сформированнный на сайте
:param long_url: Ссылка которую надо укоротить
:return: созданная короткая ссылка
"""
url_template = 'https://api-ssl.bitly.com/v4/{}'
user, bit = ['user', 'bitlinks']
with requests.Session() as s:
bitl_user_info = s.get(url_template.format(user), headers=headers)
logger.info(f'Получаем группу по пользователю ответ: {bitl_user_info.json()}')
group_guid = bitl_user_info.json()['default_group_guid']
payload = {'group_guid': group_guid, 'title': 'shortlink', 'long_url': long_url}
response = s.post(url_template.format(bit), json=payload, headers=headers)
bitlink = response.json()['id']
return bitlink
|
be5c7882a8577c8d406412c790f3d5fbcbd11019
| 3,640,989
|
import numpy
def compute_neq(count_mat):
"""
Compute the Neq for each residue from an occurence matrix.
Parameters
----------
count_mat : numpy array
an occurence matrix returned by `count_matrix`.
Returns
-------
neq_array : numpy array
a 1D array containing the neq values
"""
# get the frequency matrix
freq = utils.compute_freq_matrix(count_mat)
# Compute neq
neq_array = numpy.apply_along_axis(_neq_per_residue, 1, freq)
return neq_array
|
e3d738eb1c8ed58a3c4d4a4efc5323930d03be1f
| 3,640,990
|
import optparse
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
default=[],
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler. '
'To pass multiple flags, --compiler_flags has to be '
'specified multiple times.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser
|
e1ec0530357ad3bebbac80c86b9d9b1010e6688c
| 3,640,991
|
def spikalize_img(experiment, image, label):
"""
Transform image to spikes. Spike with poisson distributed rate proportional to pixel brightness.
:param experiment:
:param image:
:param label:
:return:
"""
image_shape = np.append(np.array(experiment.timesteps), np.array(image.shape))
rand = tf.random.uniform(shape=image_shape)
spiked_img = tf.cast(image / 255 * experiment.max_rate > rand, tf.float32)
return spiked_img, label
|
7159334b40c3841977a0772ac25c71a934d268ac
| 3,640,992
|
def update_security_schemes(spec, security, login_headers, security_schemes,
unauthorized_schema):
"""Patch OpenAPI spec to include security schemas.
Args:
spec: OpenAPI spec dictionary
Returns:
Patched spec
"""
# login_headers = {'Set-Cookie':
# {'schema':
# {'type': 'string',
# 'example': 'session=abcde12345; Path=/; HttpOnly'}}}
# security_schemes = {'cookieAuth': {'description': 'Session Cookie',
# 'type': 'apiKey',
# 'in': 'cookie',
# 'name': 'session'}}
# unauthorized_schema = {'UnauthorizedError':
# {'description': "The auth cookie isn't present",
# 'properties':
# {'schema': {'type': 'string', 'example': 'Unauthorized'}}}}
spec["components"]["securitySchemes"] = security_schemes
spec["security"] = security
spec["paths"]["/login"]["post"]["responses"][200]["headers"] = login_headers.copy()
return spec
|
1ecb5cc3a121fc151794e4e24cd4aca4bc07ce46
| 3,640,993
|
def get_geckodriver_url(version):
"""
Generates the download URL for current platform , architecture and the given version.
Supports Linux, MacOS and Windows.
:param version: the version of geckodriver
:return: Download URL for geckodriver
"""
platform, architecture = get_platform_architecture()
return f'https://github.com/mozilla/geckodriver/releases/download/{version}' \
f'/geckodriver-{version}-{platform}{architecture}.tar.gz'
|
9d71728c551c67e86a61c3b870728bc70cad48ba
| 3,640,994
|
def get_graph_size(depth: int):
"""returns how many nodes are in fully-equipped with nodes graph of the given depth"""
size = 1
cur_size = 1
ln = len(expand_sizes)
for i in range(min(ln, depth)):
cur_size *= expand_sizes[i]
size += cur_size
if ln < depth:
size += cur_size * later_expand_size*(depth - ln)
return size
|
00f2a2ce714550785e99c0d193f47df05cc30b68
| 3,640,995
|
def inherits_from(obj, parent):
"""
Takes an object and tries to determine if it inherits at *any*
distance from parent.
Args:
obj (any): Object to analyze. This may be either an instance
or a class.
parent (any): Can be either instance, class or python path to class.
Returns:
inherits_from (bool): If `parent` is a parent to `obj` or not.
Notes:
What differs this function from e.g. `isinstance()` is that `obj`
may be both an instance and a class, and parent may be an
instance, a class, or the python path to a class (counting from
the evennia root directory).
"""
if callable(obj):
# this is a class
obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.mro()]
else:
obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.__class__.mro()]
if isinstance(parent, str):
# a given string path, for direct matching
parent_path = parent
elif callable(parent):
# this is a class
parent_path = "%s.%s" % (parent.__module__, parent.__name__)
else:
parent_path = "%s.%s" % (parent.__class__.__module__, parent.__class__.__name__)
return any(1 for obj_path in obj_paths if obj_path == parent_path)
|
9d7e0665b4e4fe2a3f7c136436a2502c8b72527c
| 3,640,996
|
from typing import Optional
def load_df_from_googlesheet(
url_string: str,
skiprows: Optional[int] = 0,
skipfooter: Optional[int] = 0,
) -> pd.DataFrame:
"""Load a Pandas DataFrame from a google sheet.
Given a file object, try to read the content as a CSV file and transform
into a data frame. The skiprows and skipfooter are number of lines to skip
from the top and bottom of the file (see read_csv in pandas).
It also tries to convert as many columns as possible to date/time format
(testing the conversion on every string column).
:param url_string: URL where the file is available
:param skiprows: Number of lines to skip at the top of the document
:param skipfooter: Number of lines to skip at the bottom of the document
:return: Resulting data frame, or an Exception.
"""
# Process the URL provided by google. If the URL is obtained using the
# GUI, it has as suffix /edit?[parameters]. This part needs to be
# replaced by the suffix /export?format=csv
# For example from:
# https://docs.google.com/spreadsheets/d/DOCID/edit?usp=sharing
# to
# https://docs.google.com/spreadsheets/d/DOCID/export?format=csv&gid=0
parse_res = urlparse(url_string)
if parse_res.path.endswith('/edit'):
qs_dict = parse_qs(parse_res.query)
qs_dict['format'] = 'csv'
new_fragment = parse_res.fragment
if 'gid=' in parse_res.fragment:
qs_dict['gid'] = parse_res.fragment.split('=')[1]
new_fragment = ''
url_string = urlunparse([
parse_res.scheme,
parse_res.netloc,
parse_res.path.replace('/edit', '/export'),
parse_res.params,
urlencode(qs_dict, doseq=True),
new_fragment,
])
# Process the link using pandas read_csv
return load_df_from_csvfile(url_string, skiprows, skipfooter)
|
adfcff1968eccfa44640b5e9a7e3143703284dfb
| 3,640,997
|
def decomp(bits, dummies=default_dummies, width=default_width):
"""Translate 0s and 1s to dummies[0] and dummies[1]."""
words = (dummies[i] for i in bits)
unwrapped = ' '.join(words)
return wrap_source(unwrapped, width=width)
|
a6540cc90412b9e72b62c57fe6828b45ad5df593
| 3,640,998
|
def get_word_node_attrs(word: Word) -> WordNodeAttrs:
"""Create the graph's node attribute for a `Word`.
Build an attribute dict with the word's features. Note that we're using the
term `Word` instead of `Token` to be closer to the implementation of these
data structures in stanza. From stanza's documentation, a `Token` might
hold more the a single word in the case of multi-word tokens. For more
information please refer to
'https://stanfordnlp.github.io/stanza/data_objects.html#token'.
Arguments:
word: Word
A stanza-annotated word.
Return: WordNodeAttrs
A dictionary containing the word's features to be used by networkx's
feature graph.
"""
# Changing the color for the sentence's head token
if word.head == 0:
color = GraphNodeColor.HEAD.value
else:
color = GraphNodeColor.TOKEN.value
return {
'fname': word.sent.doc.fname,
'start_idx': word.parent.start_char,
'end_idx': word.parent.end_char,
'text': word.text,
'upos': word.upos,
'lemma': word.lemma,
'label': word.text, # for PyVis
'color': color # for pyvis
}
|
143a00206cfa8d98419f2d0e21e1013ea6dd02ab
| 3,640,999
|
def numpy_bbox_to_image(image, bbox_list, labels=None, scores=None, class_name=[], config=None):
""" Numpy function used to display the bbox (target or prediction)
"""
assert(image.dtype == np.float32 and image.dtype == np.float32 and len(image.shape) == 3)
if config is not None and config.normalized_method == "torch_resnet":
channel_avg = np.array([0.485, 0.456, 0.406])
channel_std = np.array([0.229, 0.224, 0.225])
image = (image * channel_std) + channel_avg
image = (image*255).astype(np.uint8)
elif config is not None and config.normalized_method == "tf_resnet":
image = image[..., ::-1]
image = image / 255
bbox_xcycwh = bbox.np_rescale_bbox_xcycwh(bbox_list, (image.shape[0], image.shape[1]))
bbox_x1y1x2y2 = bbox.np_xcycwh_to_xy_min_xy_max(bbox_xcycwh)
# Set the labels if not defined
if labels is None:
labels = np.zeros((bbox_x1y1x2y2.shape[0]))
bbox_area = []
# Go through each bbox
for b in range(0, bbox_x1y1x2y2.shape[0]):
x1, y1, x2, y2 = bbox_x1y1x2y2[b]
bbox_area.append((x2-x1)*(y2-y1))
# Go through each bbox
for b in np.argsort(bbox_area)[::-1]:
# Take a new color at reandon for this instance
instance_color = np.random.randint(0, 255, (3))
x1, y1, x2, y2 = bbox_x1y1x2y2[b]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
x1, y1, x2, y2 = max(0, x1), max(0, y1), min(image.shape[1], x2), min(image.shape[0], y2)
# Select the class associated with this bbox
class_id = labels[int(b)]
if scores is not None and len(scores) > 0:
label_name = class_name[int(class_id)]
label_name = "%s:%.2f" % (label_name, scores[b])
else:
label_name = class_name[int(class_id)]
class_color = CLASS_COLOR_MAP[int(class_id)]
color = instance_color
multiplier = image.shape[0] / 500
cv2.rectangle(image, (x1, y1), (x1 + int(multiplier*15)*len(label_name), y1 + 20), class_color.tolist(), -10)
cv2.putText(image, label_name, (x1+2, y1 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6 * multiplier, (0, 0, 0), 1)
cv2.rectangle(image, (x1, y1), (x2, y2), tuple(class_color.tolist()), 2)
return image
|
c9070cb917e376357363f99e9d951d84e3274684
| 3,641,000
|
def AsyncSleep(delay, name=None):
"""Pause for `delay` seconds (which need not be an integer).
This is an asynchronous (non-blocking) version of a sleep op. It includes
any time spent being blocked by another thread in `delay`. If it is blocked
for a fraction of the time specified by `delay`, it only calls `sleep`
(actually `usleep`) only for the remainder. If it is blocked for the full
time specified by `delay` or more, it returns without explictly calling
`sleep`.
Args:
delay: tf.Tensor which is a scalar of type float.
name: An optional name for the op.
Returns:
The `delay` value.
"""
return examples_async_sleep(delay=delay, name=name)
|
c6ccb12aa7e27a28591ac5282b0e78baa68df9df
| 3,641,002
|
def palide(string, length, ellipsis="...", pad=" ", position=1.0, left=False):
"""
A combination of `elide` and `pad`.
"""
return globals()["pad"](
elide(string, length, ellipsis=ellipsis, position=position),
length, pad=pad, left=left)
|
be14ecb386ef7d49a6c85514bb5bee93d482be3d
| 3,641,003
|
def get_email_adderess(email_addr):
""" Return dict from opalstack for given email address, or None """
mails = get_request("mail/list/")
for record in mails['mails']:
if record['address'] == email_addr:
return get_request("mail/read/{}".format(record['id']))
return None
|
f31ae883b40da8b9ddf743744a3611dd0968e787
| 3,641,004
|
def GK3toUTM(ea, no=None, zone=32):
"""Transform Gauss-Krueger zone 3 into UTM (for backward compatibility)."""
return GKtoUTM(ea, no, zone, gkzone=3)
|
aeab5433c8a676f862c9271f62a574bff2f74444
| 3,641,005
|
from typing import Optional
def get_all_predictions(
model: nn.Module,
dataloader: DataLoader,
device: _Device,
threshold_prob: Optional[float] = None,
decouple_fn: Optional[_DecoupleFnTest] = None,
) -> _TestResult:
"""
Make predictions on entire dataset and return raw outputs
and optionally class predictions and probabilities if it's
a classification model.
See `perform_one_epoch()` for more details.
"""
return perform_one_epoch(
phase="test",
model=model,
dataloader=dataloader,
device=device,
threshold_prob=threshold_prob,
decouple_fn=decouple_fn,
)
|
e2d04376a935a1acd1d2e0645209cb865997669e
| 3,641,006
|
def plot_perf_stats(returns, factor_returns):
"""
Create box plot of some performance metrics of the strategy.
The width of the box whiskers is determined by a bootstrap.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
bootstrap_values = timeseries.perf_stats_bootstrap(returns,
factor_returns,
return_stats=False)
bootstrap_values = bootstrap_values.drop('Kurtosis', axis='columns')
return bootstrap_values
|
f948c29df18bc93d08fd6ad1b366db310b4bf8c2
| 3,641,007
|
def create(user):
"""
This function creates a new user in the database
based on the passed-in user data.
:param user: User to create in database
:return: 201 on success, 400 on bad postal code, 406 on user exists
"""
username = user.get("username", None)
postalcode = user.get("postalcode", None)
cityname = _get_cityname(postalcode)
# Does the user already exist?
existing_master = Master.query.filter(Master.username == username).one_or_none()
if existing_master is None and cityname is not None:
# Create a user instance using the schema and the passed-in user.
user_master = Master(username=username)
db.session.add(user_master)
user_detail = Detail(postalcode=postalcode, cityname=cityname)
db.session.add(user_detail)
# Save changes to database.
db.session.commit()
return make_response(
"{username} successfully created".format(username=username), 201,
)
# If the Postal Code doesn't return any hits in Geonames
elif cityname is None:
abort(
400, "Postal code {postalcode} is invalid".format(postalcode=postalcode),
)
# Otherwise, they exist, and that's an error
else:
abort(
406,
"User with username {username} already exists".format(username=username),
)
|
2b3a64211fd66c7fbe93f51aecbceab6675f5b99
| 3,641,008
|
def show_profile(uid):
"""
Return serializable users data
:param uid:
:return String: (JSON)
"""
user = get_user_by_id(uid)
return jsonify(user.serialize)
|
cb8bc7934575b99b6098a911c4dc7ad9fb1b7a48
| 3,641,009
|
def degree_correlation(coeffs_1, coeffs_2):
"""
Correlation per spherical harmonic degree between two models 1 and 2.
Parameters
----------
coeffs_1, coeffs_2 : ndarray, shape (N,)
Two sets of coefficients of equal length `N`.
Returns
-------
C_n : ndarray, shape (nmax,)
Degree correlation of the two models. There are `N = nmax(nmax+2)`
coefficients.
"""
if coeffs_1.ndim != 1:
raise ValueError(f'Only 1-D input allowed {coeffs_1.ndim} != 1')
if coeffs_2.ndim != 1:
raise ValueError(f'Only 1-D input allowed {coeffs_2.ndim} != 1')
if coeffs_1.size != coeffs_2.size:
raise ValueError(
'Number of coefficients is '
'not equal ({0} != {1}).'.format(coeffs_1.size, coeffs_2.size))
nmax = int(np.sqrt(coeffs_1.size + 1) - 1)
C_n = np.zeros((nmax,))
R_n = np.zeros((nmax,)) # elements are prop. to power spectrum of coeffs_1
S_n = np.zeros((nmax,)) # elements are prop. to power spectrum of coeffs_2
coeffs_12 = coeffs_1*coeffs_2
for n in range(1, nmax+1):
min = n**2 - 1
max = min + (2*n + 1)
R_n[n-1] = np.sum(coeffs_1[min:max]**2)
S_n[n-1] = np.sum(coeffs_2[min:max]**2)
C_n[n-1] = (np.sum(coeffs_12[min:max]) / np.sqrt(R_n[n-1]*S_n[n-1]))
return C_n
|
10dea06b6e1f9a1c4202f3478523fe7bdcc8ab6e
| 3,641,010
|
import cdr_cleaner.args_parser as parser
def parse_args():
"""
Add file_path to the default cdr_cleaner.args_parser argument list
:return: an expanded argument list object
"""
help_text = 'path to csv file (with header row) containing pids whose observation records are to be removed'
additional_argument_1 = {parser.SHORT_ARGUMENT: '-f',
parser.LONG_ARGUMENT: '--file_path',
parser.ACTION: 'store',
parser.DEST: 'file_path',
parser.HELP: help_text,
parser.REQUIRED: True}
args = parser.default_parse_args([additional_argument_1])
return args
|
f5f89a55799ceee801b06a51e902cdd252068e50
| 3,641,011
|
def name(model):
"""A repeatable way to get the formatted model name."""
return model.__name__.replace('_', '').lower()
|
3d9ca275bfbfff6d734f49a47459761c559d906e
| 3,641,012
|
from typing import List
from typing import Optional
def render_fields(
fields: List[Field], instance_name: Optional[str] = None
) -> List[str]:
"""Renders fields to string.
Arguments:
fields:
The fields to render.
instance_name:
The name of model instance for which the fields are written.
If given, automatically insert the value for FK fields.
This assumes that the FK variables are defined before this class and follow
the convention `column_name1_column_name2_...`.
Sorts fields by being optional or not.
"""
descriptions = []
optional_descriptions = []
for field in fields:
text = render_field(field, instance_name=instance_name)
if field.null:
optional_descriptions.append(text)
else:
descriptions.append(text)
return descriptions + optional_descriptions
|
ba75827eac0ccea3e68259e27274feea17121cb2
| 3,641,013
|
def get_primary_tasks_for_service(service_arn):
"""Get the task ARN of the primary service"""
response = ecs.describe_services(cluster=cluster, services=[service_arn])
for deployment in response['services'][0]['deployments']:
if deployment['status'] == 'PRIMARY':
return get_tasks_for_task_definition(deployment['taskDefinition'])
return None
|
113e3ab3a20646d20caf08a6e1dfc4d546d2d950
| 3,641,014
|
def load_data(csv_file):
"""
@type csv_file: string
@param csv_file: path to csv file
Loads data from specified csv file
@rtype: pandas.DataFrame
@return: DataFrame from csv file without Month column
"""
return pd.read_csv(csv_file).drop('Month', 1)
|
5a458c104d763e431f0faf63a98bf4a59fd7902c
| 3,641,016
|
def nearest(array,value):
"""
Find the index of the array that is close to value
Args:
array (array): array to be tested
value (float): value to be tested
Returns:
int: index
"""
return (np.abs(array-value)).argmin()
|
5197d0cae968557b519d1fa4025d2b834d7065c5
| 3,641,017
|
def fine_tune_model(trainX: np.ndarray, trainy: np.ndarray, cv: int = 5) -> SVC:
"""Receives training set and run a grid search to find the best
hyperparameters. It returns the best model, already trained.
Args:
trainX (np.ndarray): train array containg embedding images.
trainy (np.ndarray): train array containg labels.
cv (int, optional): Number of folds to apply in cross validation.
Defaults to 5.
Returns:
SVC: Trained model.
"""
param_grid = {'C': [0.1, 1, 10, 100, 1000],
'gamma': ['auto', 'scale'],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'probability': [True]}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=1,
return_train_score=True, cv=cv)
grid.fit(trainX, trainy)
return grid.best_estimator_
|
d493a0b0023f57116858878163b81463c1a7166e
| 3,641,018
|
from datetime import datetime
import numpy
def get_empty_array_year(year=datetime.now().year, start_end=True, variable_list=['TEST', ], variable_list_dtype=None, record_interval='HH'):
"""
Allocates and returns new empty record array for given year using list of dtypes
(or variable labels as 8byte floats if no dtype list provided) for variables plus
TIMESTAMP_START and TIMESTAMP_END at beginning
:param year: year to be represented in array (current year if not provided)
:type year: int
:param start_end: if True, uses TIMESTAMP_START and TIMESTAMP_END, if not, uses only TIMESTAMP (end)
:type start_end: bool
:param variable_list: list of strings to be used as variable labels (assumed f8 type)
:type variable_list: list (of str)
:param variable_list_dtype: list of dtype tuples (label, data type) to be used as variables
:type variable_list_dtype: list (of (str, str)-tuples)
:param record_interval: resolution to be used for record ['HR' for hourly, 'HH' for half-hourly (default)]
:type record_interval: str
"""
# record_interval
if record_interval.lower() == 'hh':
step = timedelta(minutes=30)
elif record_interval.lower() == 'hr':
step = timedelta(minutes=60)
else:
msg = 'Unknown record_interval: {r}'.format(r=record_interval)
log.critical(msg)
raise ONEFluxError(msg)
# timestamp list
timestamp_list = []
current_timestamp = datetime(int(year), 1, 1, 0, 0, 0)
while current_timestamp.year < int(year) + 1:
timestamp_list.append(current_timestamp)
current_timestamp += step
timestamp_list.append(current_timestamp)
timestamp_list_begin = timestamp_list[:-1]
timestamp_list_end = timestamp_list[1:]
# array dtype
dtype = ([(var, 'f8') for var in variable_list] if variable_list_dtype is None else variable_list_dtype)
if start_end:
dtype = [('TIMESTAMP_START', 'a25'), ('TIMESTAMP_END', 'a25')] + dtype
else:
dtype = [('TIMESTAMP', 'a25'), ] + dtype
# record array
data = numpy.zeros(len(timestamp_list_begin), dtype=dtype)
data[:] = -9999.0
if start_end:
data['TIMESTAMP_START'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_begin]
data['TIMESTAMP_END'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_end]
else:
data['TIMESTAMP'][:] = [i.strftime('%Y%m%d%H%M') for i in timestamp_list_end]
return data
|
d703ddc41233125b2b426c0423b0a3dcb85f73a0
| 3,641,019
|
def validate_geojson(data):
"""
Validate geojson
"""
if not (isinstance(data, dict)):
return False
if not isinstance(data.get('features'), list):
return False
gj = geojson.FeatureCollection([geojson.Feature(f) for f in data['features']])
return gj.is_valid
|
c48dfb76ff6d0255299f3913a644edff679b1a1a
| 3,641,020
|
from pathlib import Path
import time
def run_wps(conn, config_wpsprocess, **kwargs):
"""
primary function to orchestrate running the wps job from submission to download (if required)
Parameters:
-----------
conn: dict,
Connection parameters
Example: conn = {'domain': 'https://earthobs.defra.gov.uk',
'username': '<insert-username>',
'access_token': '<insert-access-token>'}
config_wpsprocess: list or dict,
list of dictionaries for individual wps submission requests.
users can generate a list of multiple dictionaries, one dict per wps job
with "xml_config", this is dict of variables that templated into the xml
payload for the WPS request submission
Example:
config_wpsprocess = [{'template_xml':'gsdownload_template.xml',
'xml_config':{
'template_layer_name':lyr,
'template_outputformat':'image/tiff',
'template_mimetype':'application/zip'},
'dl_bool':True
}]
output_dir: str or Pathlib object, optional,
user specified output directory
verify: str, optional:
add custom path to any organisation certificate stores that the
environment needs
Default Value:
* True
Possible Value:
* 'dir/dir/cert.file'
Returns:
-----------
list_download_paths: list,
list of pathlib objects for downloaded output for further reuse
"""
# set output path if not specified
if 'output_dir' not in kwargs:
kwargs['output_dir']=Path.cwd()
if 'verify' not in kwargs:
kwargs['verify'] = True
# set the request config dictionary
request_config = {
'wps_server':conn['domain'] + '/geoserver/ows',
'access_token':conn['access_token'],
'headers':{'Content-type': 'application/xml','User-Agent': 'python'},
'verify':kwargs['verify']
}
# submit wps jobs
try:
execution_dict = submit_wps_queue(request_config, config_wpsprocess)
except Exception as error:
print(error.args)
print('The WPS submission has failed')
else:
# INITIALISE VARIABLES and drop the wps log file if it exists
path_output = make_output_dir(kwargs['output_dir'])
# keep calling the wps job status until 'continue_process' = False
while True:
execution_dict = poll_api_status(execution_dict, request_config, path_output)
if execution_dict['continue_process']:
time.sleep(15)
else:
break
# after download is complete, process downloaded files (eg renames and extracting zips)
if execution_dict['job_status'] == 'DOWNLOAD-SUCCESSFUL':
execution_dict = process_wps_downloaded_files(execution_dict)
# set log file and job duration in dict
execution_dict['log_file_path'] = path_output / 'wps-log.csv'
execution_dict['total_job_duration'] = (execution_dict['timestamp_job_end'] - execution_dict['timestamp_job_start']).total_seconds() / 60
return execution_dict
|
d629939aaa32399a52a2f8ed1b0c8b5e94206f29
| 3,641,021
|
from chat.models import Chat
from ct.models import Role
def get_redirect_url(user):
"""
Analyse user and redirect:
Instructor:
onboarding is disabled - to /ctms/
onboarding is enabled and not achieved needed percent - to /ctms/onboarding/
onboarding is enabled and achieved needed percent - to /ctms/
Student:
Depends on type of chat student took part of and redirect to:
/lms/courses/<course_id> or /lms/tester/courses/<course_pk>
If user doesn't have any chat:
look at user's role and get lms type whether from invite or course of role
Arguments:
user (obj): User model of django.contrib.auth.models
Return:
redirect_url (str)
"""
redirect_url = reverse('ct:home') # default
if not user:
return
if getattr(user, 'instructor', None):
if waffle.switch_is_active('ctms_onboarding_enabled') and \
get_onboarding_percentage(user.id) < settings.ONBOARDING_PERCENTAGE_DONE:
redirect_url = reverse('ctms:onboarding')
else:
redirect_url = reverse('ctms:my_courses')
else:
chat = Chat.objects.filter(user=user).order_by('-timestamp').first()
if chat:
view_identificator = ''
if chat.is_test:
view_identificator = 'tester_'
course = chat.enroll_code.courseUnit.course
redirect_url = reverse(
'lms:{}course_view'.format(view_identificator),
kwargs={'course_id': course.id}
)
else:
view_identificator = ''
role = user.role_set.filter(role__in=[Role.ENROLLED, Role.SELFSTUDY]).last()
if role:
last_invite = role.course.invite_set.filter(status='joined', user=user, type='tester').last()
if last_invite:
view_identificator = 'tester_'
redirect_url = reverse(
'lms:{}course_view'.format(view_identificator),
kwargs={'course_id': role.course.id}
)
return redirect_url
|
28faf82e8b22eb3602ba70e00a97a97ae50d93a1
| 3,641,022
|
def _ds_to_arrraylist(
ds, bands, time_dim, x_dim, y_dim, percentile_stretch, image_proc_func=None
):
"""
Converts an xarray dataset to a list of numpy arrays for plt.imshow plotting
"""
# Compute percents
p_low, p_high = ds[bands].to_array().quantile(percentile_stretch).values
array_list = []
for i, timestep in enumerate(ds[time_dim]):
# Select single timestep from the data array
ds_i = ds[{time_dim: i}]
# Get shape of array
x = len(ds[x_dim])
y = len(ds[y_dim])
if len(bands) == 1:
# Create new one band array
img_toshow = exposure.rescale_intensity(
ds_i[bands[0]].values, in_range=(
p_low, p_high), out_range="image"
)
else:
# Create new three band array
rawimg = np.zeros((y, x, 3), dtype=np.float32)
# Add xarray bands into three dimensional numpy array
for band, colour in enumerate(bands):
rawimg[:, :, band] = ds_i[colour].values
# Stretch contrast using percentile values
img_toshow = exposure.rescale_intensity(
rawimg, in_range=(p_low, p_high), out_range=(0, 1.0)
)
# Optionally image processing
if image_proc_func:
img_toshow = image_proc_func(img_toshow).clip(0, 1)
array_list.append(img_toshow)
return array_list, p_low, p_high
|
85b05277cf874a32eb006045437eee8ae02ce0ef
| 3,641,023
|
import six
import binascii
def derive_key(secret, salt, iterations=1000, keylen=32):
"""
Computes a derived cryptographic key from a password according to PBKDF2.
.. seealso:: http://en.wikipedia.org/wiki/PBKDF2
:param secret: The secret.
:type secret: bytes or unicode
:param salt: The salt to be used.
:type salt: bytes or unicode
:param iterations: Number of iterations of derivation algorithm to run.
:type iterations: int
:param keylen: Length of the key to derive in bytes.
:type keylen: int
:return: The derived key in Base64 encoding.
:rtype: bytes
"""
assert(type(secret) in [six.text_type, six.binary_type])
assert(type(salt) in [six.text_type, six.binary_type])
assert(type(iterations) in six.integer_types)
assert(type(keylen) in six.integer_types)
if type(secret) == six.text_type:
secret = secret.encode('utf8')
if type(salt) == six.text_type:
salt = salt.encode('utf8')
key = pbkdf2(secret, salt, iterations, keylen)
return binascii.b2a_base64(key).strip()
|
04adaf71f3f9cf94e602029a242fedd037a40187
| 3,641,024
|
import torch
def calc_params_l2_norm(model: torch.nn.Module, bf16: bool):
"""Calculate l2 norm of parameters """
# args = get_args()
if not isinstance(model, list):
model = [model]
# Remove duplicate params.
params_data = []
for model_ in model:
for param in model_.parameters():
is_not_shared = param_is_not_shared(param)
is_not_tp_duplicate = parallel_state.param_is_not_tensor_parallel_duplicate(param)
if is_not_shared and is_not_tp_duplicate:
if bf16:
params_data.append(param.data.float())
else:
params_data.append(param.data)
# Calculate norm
dummy_overflow_buf = torch.cuda.IntTensor([0])
norm, _ = multi_tensor_applier(
amp_C.multi_tensor_l2norm, dummy_overflow_buf, [params_data], False # no per-parameter norm
)
norm_2 = norm * norm
# Sum across all model-parallel GPUs.
torch.distributed.all_reduce(
norm_2, op=torch.distributed.ReduceOp.SUM, group=parallel_state.get_model_parallel_group()
)
return norm_2.item() ** 0.5
|
399fc47296ac1ba3398dd5be834358f5ef50c9a4
| 3,641,026
|
def gradient_descent(f,init_val_dict, learning_rate=0.001, max_iter=1000, stop_stepsize=1e-6,return_history=False):
"""
Gradient Descent finding minimum for a
single expression
INPUTS
=======
f: expression
init_val_dict:dictionary containing initial value of variables
learning_rate: the step size between iterations
max_iter: maximum iteration before the algorithm stops
stop_stepsize: tolerance, the minimum threshold for absolute
difference of value of f from 0 for the algorithm to stop
return_history: default set to False. If True, return the trajectory
of the algorithm including the final answer
RETURNS
========
If return_history = False: variable values corresponding to the
minimum value of f
If return_history = True, return the trajectory
of the algorithm including the final answer
"""
f_grad = f.gradient_at(init_val_dict)
variables = [var for var in init_val_dict.keys()]
curr_point = np.array([v for k, v in init_val_dict.items()])
history = [curr_point.tolist()]
for i in range(max_iter):
prev_point =curr_point
prev_val_dict = {var: val for var, val in zip(variables, prev_point)}
f_grad =f.gradient_at(prev_val_dict)
curr_point =curr_point - learning_rate*f_grad
history.append(curr_point.tolist())
if np.linalg.norm(curr_point-prev_point, ord=2) < stop_stepsize: break
if return_history:
return history
return {var: val for var, val in zip(variables, curr_point)}
|
ba9edd1b41b7ac8e2e1d14b0a2958b7fe07bcf2a
| 3,641,027
|
from typing import Dict
from typing import Any
def gcp_iam_organization_role_permission_remove_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Remove permissions from custom organization role.
Args:
client (Client): GCP API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
return remove_custom_role_permissions(client_request_get_method=client.gcp_iam_organization_role_get_request,
client_request_update_method=client.gcp_iam_organization_role_update_request,
args=args)
|
42f56361d36b4fa0fbdea09031923235a0a3eb47
| 3,641,028
|
import socket
def get_hostname(ipv) -> str:
"""
Get hostname from IPv4 and IPv6.
:param ipv: ip address
:return: hostname
"""
return socket.gethostbyaddr(ipv)[0]
|
e7d660dc3c5e30def646e56fa628099e997145be
| 3,641,029
|
import torch
def load_model():
"""
Load CLIP model into memory.
Will download the model from the internet if it's not found in `WAGTAIL_CLIP_DOWNLOAD_PATH`.
"""
device = torch.device("cpu")
model, preprocess = clip.load("ViT-B/32", device, download_root=DOWNLOAD_PATH)
return model, device, preprocess
|
deebe0a5d5edb82b34d386771367ab65aaf6cb4b
| 3,641,030
|
from traceback import format_exception
def exception_response(ex: Exception):
"""Generate JSON payload from ApiException or Exception object."""
if not ex:
app.logger.error("Function received argument: None!")
return __make_response(
500,
{
"error" : "Unknown",
"details" : "api.exception_response() received: None!"
}
)
#
try:
if isinstance(ex, Exception):
# Member variable '.ApiException' reveals the type
if getattr(ex, 'ApiException', None):
app.logger.error(
"ApiException: '{}'"
.format(str(ex))
)
response_code = ex.code
response_payload = ex.to_dict()
else:
# Unexpected error, log trace by using logger.exception()
app.logger.exception(str(ex))
e = format_exception(type(ex), ex, ex.__traceback__)
response_payload = {
"error" : e[-1],
"trace" : "".join(e[1:-1])
}
response_code = 500
return __make_response(response_code, response_payload)
else:
return __make_response(
500,
{
"error" : "Uknown",
"details" : "api.exception_response() received unsupported argument",
"type" : type(ex)
}
)
except Exception as e:
app.logger.exception("api.exception_response(): Internal Error!")
return __make_response(
500,
{
"error" : "Internal Error",
"details" : "api.exception_response() internal failure!"
}
)
|
d3b8d58d3214d3543cea5135a46455fc824b78d7
| 3,641,031
|
def check(pack, inst):
"""
A function to check if an instruction is present in the packet
Input:
- pack: The packet to be checked
- inst: The instruction
Output:
Returns True if the instruction is present in the packet else Fase
"""
inst_key = getPacketKey(inst[0])
for key in inst_key:
if key:
if pack[key] == inst:
return True
return False
|
905fb2061b2fd5c129cdb0903ef84184c55844af
| 3,641,032
|
def get_score(true, predicted):
"""Returns F1 per instance"""
numerator = len(set(predicted.tolist()).intersection(set(true.tolist())))
p = numerator / float(len(predicted))
r = numerator / float(len(true))
if r == 0.:
return 0.
return 2 * p * r / float(p + r)
|
115a4847e3d991f47415554401df25d72d74bb2f
| 3,641,034
|
def check_ratio_argv(_argv):
"""Return bool, check optional argument if images are searched by same ratio"""
# [-1] To avoid checking 3 places at one, this argument is always last
return bool(_argv[-2] in ARGV["search by ratio"] and _argv[-1] in ARGV["search by ratio"])
|
23a677af4042fcc3616a25378af4e7721971de56
| 3,641,035
|
def binary_erosion(input, structure = None, iterations = 1, mask = None,
output = None, border_value = 0, origin = 0, brute_force = False):
"""Multi-dimensional binary erosion with the given structure.
An output array can optionally be provided. The origin parameter
controls the placement of the filter. If no structuring element is
provided an element is generated with a squared connectivity equal
to one. The border_value parameter gives the value of the array
outside the border. The erosion operation is repeated iterations
times. If iterations is less than 1, the erosion is repeated until
the result does not change anymore. If a mask is given, only those
elements with a true value at the corresponding mask element are
modified at each iteration.
"""
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 0, brute_force)
|
06de7142d7eca9ca3f5712f76318215950f4c710
| 3,641,036
|
def chord_to_freq_ratios(chord):
"""Return the frequency ratios of the pitches in <chord>
Args:
chord (tuple of ints): see <get_consonance_score>.
Returns:
list of ints:
"""
numerators = [JI_NUMS[i] for i in chord]
denoms = [JI_DENOMS[i] for i in chord]
denominator = get_lcm(denoms)
numerators = [(numerators[i] * denominator) // denoms[i] for i in \
range(len(numerators))]
return numerators, denominator
|
4811a6b69e6fd646adf5dc7e7a31a23be8fa6708
| 3,641,037
|
import torch
def proto_factor_cosine(local_proto, global_proto):
"""
[C, D]: D is 64 or 4
"""
# factor = 1
norm_local = torch.norm(local_proto, dim=-1, keepdim=False)
norm_global = torch.norm(global_proto, dim=-1, keepdim=False) # [C]
factor_refined = torch.sum(local_proto*global_proto, dim=-1, keepdim=False)/(norm_local*norm_global+1e-6)
return factor_refined
|
6e9f7540ec1339efe3961b103633f5175cb38c49
| 3,641,038
|
def urlparse(d, keys=None):
"""Returns a copy of the given dictionary with url values parsed."""
d = d.copy()
if keys is None:
keys = d.keys()
for key in keys:
d[key] = _urlparse(d[key])
return d
|
91cd40ef294443431a772ec14ef4aa54dab34ea8
| 3,641,039
|
import numpy
import math
def doFDR(pvalues,
vlambda=numpy.arange(0,0.95,0.05),
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df = 3,
smooth_log_pi0 = False):
"""modeled after code taken from http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by Storey et al. (2002).
"""
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
# these next few functions are the various ways to estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
R.assign( "pi0", pi0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
if pi0_method=="smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
R.assign( "pvalues", pvalues)
pi0 = R("""
m <- length(pvalues)
minpi0 <- min(pi0)
mse <- rep(0,length(vlambda))
pi0_boot <- rep(0,length(vlambda))
for(i in 1:100)
{
pvalues_boot <- sample(pvalues,size=m,replace=TRUE)
for(i in 1:length(vlambda))
{
pi0_boot[i] <- mean(pvalues_boot>vlambda[i])/(1-vlambda[i])
}
mse <- mse + (pi0_boot-minpi0)^2
}
pi0 <- min(pi0[mse==min(mse)])""")
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
R.assign( "pi0", pi0 )
if pi0 <= 0:
raise ValueError( "The estimated pi0 <= 0. Check that you have valid p-values or use another vlambda method." )
if fdr_level != None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError( "'fdr_level' must be within (0, 1].")
# The estimated q-values calculated here
#u = numpy.argsort( p )
# change by Alan
# ranking function which returns number of observations less than or equal
R.assign( "pvalues", pvalues )
R.assign( "robust", robust )
qvalues = R("""u <- order(pvalues)
qvalues.rank <- function(x)
{
idx <- sort.list(x)
fc <- factor(x)
nl <- length(levels(fc))
bin <- as.integer(fc)
tbl <- tabulate(bin)
cs <- cumsum(tbl)
tbl <- rep(cs, tbl)
tbl[idx] <- tbl
return(tbl)
}
v <- qvalues.rank(pvalues)
m <- length(pvalues)
qvalues <- pi0 * m * pvalues / v
if(robust)
{
qvalues <- pi0*m*pvalues/(v*(1-(1-pvalues)^m))
}
qvalues[u[m]] <- min(qvalues[u[m]],1)
for(i in (m-1):1)
{
qvalues[u[i]] <- min(qvalues[u[i]],qvalues[u[i+1]],1)
}
qvalues
""")
result = FDRResult()
result.mQValues = qvalues
if fdr_level != None:
result.mPassed = [ x <= fdr_level for x in result.mQValues ]
else:
result.mPassed = [ False for x in result.mQValues ]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
return result
|
17919d989ca07b4fb87930141cef3ce392b66ad4
| 3,641,040
|
def sequence(ini, end, step=1):
""" Create a sequence from ini to end by step. Similar to
ee.List.sequence, but if end != last item then adds the end to the end
of the resuting list
"""
end = ee.Number(end)
if step == 0:
step = 1
amplitude = end.subtract(ini)
mod = ee.Number(amplitude).mod(step)
seq = ee.List.sequence(ini, end, step)
condition = mod.neq(0)
final = ee.Algorithms.If(condition, seq.add(end), seq)
return ee.List(final)
|
cca23fd00ddf1237a95a53b7f6a3f1bc264f84da
| 3,641,041
|
def kBET_single(
matrix,
batch,
k0=10,
knn=None,
verbose=False
):
"""
params:
matrix: expression matrix (at the moment: a PCA matrix, so do.pca is set to FALSE
batch: series or list of batch assignemnts
returns:
kBET observed rejection rate
"""
anndata2ri.activate()
ro.r("library(kBET)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
ro.globalenv['batch'] = batch
if verbose:
print("kBET estimation")
ro.globalenv['knn_graph'] = knn
ro.globalenv['k0'] = k0
ro.r(
"batch.estimate <- kBET("
" data_mtrx,"
" batch,"
" knn=knn_graph,"
" k0=k0,"
" plot=FALSE,"
" do.pca=FALSE,"
" heuristic=FALSE,"
" adapt=FALSE,"
f" verbose={str(verbose).upper()}"
")"
)
try:
score = ro.r("batch.estimate$summary$kBET.observed")[0]
except rpy2.rinterface_lib.embedded.RRuntimeError:
score = np.nan
anndata2ri.deactivate()
return score
|
42ecfb9dee65806a25e92764ea7c1ef54316be02
| 3,641,042
|
from typing import Tuple
import numpy
def get_eye_center_position(face: Face) -> Tuple[numpy.int64, numpy.int64]:
"""Get the center position between the eyes of the given face.
Args:
face (:class:`~.types.Face`):
The face to extract the center position from.
Returns:
Tuple[:data:`numpy.int64`, :data:`numpy.int64`]:
The position directly between the eyes of the face
"""
(left_start, left_end), (right_start, right_end) = get_eye_positions(face)
return (left_start + right_start) // 2, (left_end + right_end) // 2
|
562dca1971996e8d7497d146aa7ccefcb3ce8006
| 3,641,043
|
def esc_quotes(strng):
""" Return the input string with single and double quotes escaped out.
"""
return strng.replace('"', '\\"').replace("'", "\\'")
|
25956257e06901d4f59088dd2c17ddd5ea620407
| 3,641,044
|
def gen_fake_game_data():
"""Creates an example Game object"""
game = Game(
gameday_id='2014/04/04/atlmlb-wasmlb-1',
venue='Nationals Park',
start_time=parser.parse('2014-04-04T13:05:00-0400'),
game_data_directory='/components/game/mlb/year_2014/month_04/day_04/gid_2014_04_04_atlmlb_wasmlb_1',
home_name_abbrev='WSH',
home_team_city='Washington',
home_team_name='Nationals',
away_name_abbrev='ATL',
away_team_city='Atlanta',
away_team_name='Braves',
home_team_runs=1,
away_team_runs=2
)
return game
|
415c85eb0ba4e03c135ab56791177ebb634ea5e3
| 3,641,045
|
def unsafe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load_all(stream, UnsafeLoader)
|
f6307614e776b221ec22b063680f34f5e2ddf789
| 3,641,046
|
def jaccard(set1, set2):
"""
computes the jaccard coefficient between two sets
@param set1: first set
@param set2: second set
@return: the jaccard coefficient
"""
if len(set1) == 0 or len(set2) == 0:
return 0
inter = len(set1.intersection(set2))
return inter / (len(set1) + len(set2) - inter)
|
9a99c6c5251bdb7cb10f6d3088ac6ac52bb02a55
| 3,641,049
|
def to_numeric(arg):
"""
Converts a string either to int or to float.
This is important, because e.g. {"!==": [{"+": "0"}, 0.0]}
"""
if isinstance(arg, str):
if '.' in arg:
return float(arg)
else:
return int(arg)
return arg
|
e82746e1c5c84b57e59086030ff7b1e93c89a8ec
| 3,641,053
|
def get_kde_polyfit_estimator(samples, N=100000, bandwidth=200, maxlength=150000, points=500, degree=50):
"""多項式近似したバージョンを返すやつ 一応両方かえす"""
f = get_kde_estimator(samples, N, bandwidth)
x = np.linspace(1, maxlength, points)
z = np.polyfit(x, f(x), degree)
return (lambda x: np.where(x<=maxlength, np.poly1d(z)(x), np.poly1d(z)(maxlength))), f
|
72801a8be25826ef42ab700e5cae742ed59fcea4
| 3,641,054
|
def read_tracker(file_name):
"""
"""
with open(file_name, "r") as f:
return int(f.readline())
|
9d1f43b8f833b5ca86c247760ae79e18f33aa019
| 3,641,055
|
def MixR2VaporPress(qv, p):
"""Return Vapor Pressure given Mixing Ratio and Pressure
INPUTS
qv (kg kg^-1) Water vapor mixing ratio`
p (Pa) Ambient pressure
RETURNS
e (Pa) Water vapor pressure
"""
return qv * p / (Epsilon + qv)
|
71d7ee564d07292ef4831bf39e54f51071b7d7dd
| 3,641,056
|
def sigmoid_derivative(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z+1e-8))
'''
s = np.zeros_like(Z)
for i in range(len(Z)):
if Z[i] >= 0:
t = np.exp(-Z[i])
s[i] = 1 / (1 + t)
else:
# if x is less than zero then z will be small, denom can't be
# zero because it's 1+z.
t = np.exp(Z[i])
s[i] = t/(1+t)
'''
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
|
6b45e722bc48b4cf60170de267c235d0943d022c
| 3,641,057
|
from typing import Counter
def part2(lines, rounds=100):
"""
>>> data = load_example(__file__, '24')
>>> part2(data, 0)
10
>>> part2(data, 1)
15
>>> part2(data, 2)
12
>>> part2(data, 3)
25
>>> part2(data, 4)
14
>>> part2(data, 5)
23
>>> part2(data, 6)
28
>>> part2(data, 7)
41
>>> part2(data, 8)
37
>>> part2(data, 9)
49
>>> part2(data, 10)
37
>>> part2(data, 20)
132
>>> part2(data, 30)
259
>>> part2(data, 40)
406
>>> part2(data, 50)
566
>>> part2(data, 60)
788
>>> part2(data, 70)
1106
>>> part2(data, 80)
1373
>>> part2(data, 90)
1844
>>> part2(data, 100)
2208
"""
endpoints = prepare_endpoints(lines)
real_endpoints = {ep: True for ep, count in Counter(endpoints).items() if count % 2 == 1}
return simulate(real_endpoints, rounds)
|
ff33f249628b1dec33f7ea886f5423012914fc4c
| 3,641,058
|
def binary_search(
items,
target_key,
target_key_hi=None,
key=None,
lo=None,
hi=None,
target=Target.any,
):
"""
Search for a target key using binary search and return (found?,
index / range).
The returned index / range is as follows according to the desired
target:
* Target.lo: lo
* Target.hi: hi
* Target.any: Any `x` such that `lo <= x < hi`
* Target.range: (lo, hi)
Where:
* `lo` is the smallest index s.t. `target_key <= key(items[lo])`
* `hi` is the smallest index s.t. `target_key_hi < key(items[hi])`
Thus, the slice of items matching the target key(s) is `[lo:hi]`.
Arguments:
* items: Indexable such that its keys are sorted.
* target_key: What to search for. Keys must be orderable.
* key: Key function taking arguments (index, item) that returns the
sort key for the item at the given index. (This allows one to
have a separate array of keys.) If `None`, items are their own
keys.
* lo: Initial lower bound index (inclusive)
* hi: Initial upper bound index (exclusive)
* target: What in the items to target: existence, low index, high
index, or the whole range. See `Target`.
* target_key_hi: If searching for a range, search for target keys k
in `target_key <= k < target_key_hi`. (Ignored otherwise.)
"""
if target == Target.range:
if target_key_hi is None:
target_key_hi = target_key
_, lo_idx, hi_le, hi_gt = _binary_search(
items, target_key, target_key_hi, key, lo, hi, Target.lo)
_, hi_idx, _, _ = _binary_search(
items, target_key_hi, None, key, hi_le, hi_gt, Target.hi)
return (lo_idx < hi_idx, (lo_idx, hi_idx))
else:
found, idx, _, _ = _binary_search(
items, target_key, None, key, lo, hi, target)
return (found, idx)
|
c0539322c0a2dd9cd3b53fa65bf6ecf28840546e
| 3,641,059
|
def func_real_dirty_gauss(dirty_beam):
"""Returns a parameteric model for the map of a point source,
consisting of the interpolated dirty beam along the y-axis
and a sinusoid with gaussian envelope along the x-axis.
This function is a wrapper that defines the interpolated
dirty beam.
Parameters
----------
dirty_beam : scipy.interpolate.interp1d
Interpolation function that takes as an argument el = sin(za)
and outputs an np.ndarray[nel, nra] that represents the dirty
beam evaluated at the same right ascension as the map.
Returns
-------
real_dirty_gauss : np.ndarray[nra*ndec]
Model prediction for the map of the point source.
"""
def real_dirty_gauss(
coord, peak_amplitude, centroid_x, centroid_y, fwhm_x, offset, fringe_rate
):
"""Returns a parameteric model for the map of a point source,
consisting of the interpolated dirty beam along the y-axis
and a sinusoid with gaussian envelope along the x-axis.
Parameter
---------
coord : [ra, dec]
Tuple containing the right ascension and declination, each
of which is coordinate vectors of length nra and ndec, respectively.
peak_amplitude : float
Model parameter. Normalization of the gaussian
in the right ascension direction.
centroid_x : float
Model parameter. Centroid of the gaussian in degrees in the
right ascension direction.
centroid_y : float
Model parameter. Centroid of the dirty beam in degrees in the
declination direction.
fwhm_x : float
Model parameter. Full width at half maximum of the gaussian
in degrees in the right ascension direction.
offset : float
Model parameter. Constant background value of the map.
fringe_rate : float
Model parameter. Frequency of the sinusoid.
Returns
-------
model : np.ndarray[nra*ndec]
Model prediction for the map of the point source.
"""
x, y = coord
model = (
peak_amplitude
* np.exp(
-4.0 * np.log(2.0) * ((x[:, np.newaxis] - centroid_x) / fwhm_x) ** 2
)
* dirty_beam(y - _dec_to_el(centroid_y))
) + offset
phase = np.exp(
2.0j
* np.pi
* np.cos(np.radians(centroid_y))
* np.sin(-np.radians(x - centroid_x))
* fringe_rate
)
return (model * phase[:, np.newaxis]).real.ravel()
return real_dirty_gauss
|
2a0da2176f67a9cd9dd31af48b987f7f3d9d8342
| 3,641,060
|
def shortest_path(graph, a_node, b_node):
""" code by Eryk Kopczynski """
front = deque()
front.append(a_node)
came_from = {a_node: [a_node]}
while front:
cp = front.popleft()
for np in graph.neighbors(cp):
if np not in came_from:
front.append(np)
came_from[np] = [came_from[cp], np]
"""flatten added by Bruce Wernick. This is purely cosmetic and not ideal.
It looks like the came_from dict is storing unnecessary information!
"""
return flatten(came_from.get(b_node))
|
6c2f04282eac52fcad7c4b15fa23753373940d6b
| 3,641,061
|
def rank_compute(prediction, att_plt, key, byte):
"""
- prediction : predictions of the NN
- att_plt : plaintext of the attack traces
- key : Key used during encryption
- byte : byte to attack
"""
(nb_trs, nb_hyp) = prediction.shape
idx_min = nb_trs
min_rk = 255
key_log_prob = np.zeros(nb_hyp)
rank_evol = np.full(nb_trs,255)
prediction = np.log(prediction+1e-40)
for i in range(nb_trs):
for k in range(nb_hyp):
key_log_prob[k] += prediction[i,AES_Sbox[k^att_plt[i,byte]]] #Computes the hypothesis values
rank_evol[i] = rk_key(key_log_prob,key)
return rank_evol
|
a21a5fbf5db64cab1fe87ed37c4a9770f5ccd9f8
| 3,641,062
|
def purelin(n):
"""
Linear
"""
return n
|
493c4ae481702194fe32eec44e589e5d15614b99
| 3,641,063
|
def arrayDimension(inputArray):
"""Returns the dimension of a list-formatted array.
The dimension of the array is defined as the number of nested lists.
"""
return len(arraySize(inputArray))
|
4d98b76ce6f5aeae9a8171918a78c31ef75dd33e
| 3,641,064
|
from datetime import datetime
def process_data(records, root) -> bool:
"""Creates the xml file that will be imported in pure."""
for record in records:
item_metadata = record["metadata"]
# If the rdm record has a uuid means that it was imported from pure - REVIEW
if "uuid" in item_metadata:
continue
# Checks if the record was created today
if record["created"] <= datetime.today().strftime("%Y-%m-%d"):
return False
# Adds fields to the created xml element
populate_xml(item_metadata, record, root)
return True
|
3e78792c2f147cb56ac502d783fb2a1e3346be53
| 3,641,065
|
def hopcroft(G, S):
"""Hopcroft's algorthm for computing state equivalence.
Parameters
----------
G : fully deterministic graph
S : iterable
one half of the initial (bi)partition
Returns
-------
Partition
"""
sigma = alphabet(G)
partition = Partition(list(G))
p1, p2 = partition.split(S)[0]
smaller = partition.select_smaller(p1, p2)
wait_set = set()
for a in sigma:
wait_set.add((smaller, a))
while wait_set:
p, a = wait_set.pop()
inv_a_p = G.in_edges(partition.parts[p], data="label")
inv_a_p = (p for (p, q, label) in inv_a_p if label == a)
for (p1, p2) in partition.split(inv_a_p):
for b in sigma:
if (p1, b) in wait_set:
wait_set.add((p2, b))
else:
smaller = partition.select_smaller(p1, p2)
wait_set.add((smaller, b))
return partition
|
a841dc0a77bb82a27937b20a7335c399ff9b53f5
| 3,641,066
|
import logging
def TVRegDiff(data, itern, alph, u0=None, scale='small', ep=1e-6, dx=None,
plotflag=_has_matplotlib, diagflag=True, precondflag=True,
diffkernel='abs', cgtol=1e-4, cgmaxit=100):
"""
Estimate derivatives from noisy data based using the Total
Variation Regularized Numerical Differentiation (TVDiff)
algorithm.
Parameters
----------
data : ndarray
One-dimensional array containing series data to be
differentiated.
itern : int
Number of iterations to run the main loop. A stopping
condition based on the norm of the gradient vector g
below would be an easy modification. No default value.
alph : float
Regularization parameter. This is the main parameter
to fiddle with. Start by varying by orders of
magnitude until reasonable results are obtained. A
value to the nearest power of 10 is usally adequate.
No default value. Higher values increase
regularization strenght and improve conditioning.
u0 : ndarray, optional
Initialization of the iteration. Default value is the
naive derivative (without scaling), of appropriate
length (this being different for the two methods).
Although the solution is theoretically independent of
the initialization, a poor choice can exacerbate
conditioning issues when the linear system is solved.
scale : {large' or 'small' (case insensitive)}, str, optional
Default is 'small'. 'small' has somewhat better boundary
behavior, but becomes unwieldly for data larger than
1000 entries or so. 'large' has simpler numerics but
is more efficient for large-scale problems. 'large' is
more readily modified for higher-order derivatives,
since the implicit differentiation matrix is square.
ep : float, optional
Parameter for avoiding division by zero. Default value
is 1e-6. Results should not be very sensitive to the
value. Larger values improve conditioning and
therefore speed, while smaller values give more
accurate results with sharper jumps.
dx : float, optional
Grid spacing, used in the definition of the derivative
operators. Default is the reciprocal of the data size.
plotflag : bool, optional
Flag whether to display plot at each iteration.
Default is True. Useful, but adds significant
running time.
diagflag : bool, optional
Flag whether to display diagnostics at each
iteration. Default is True. Useful for diagnosing
preconditioning problems. When tolerance is not met,
an early iterate being best is more worrying than a
large relative residual.
precondflag: bool, optional
Flag whether to use a preconditioner for conjugate gradient solution.
Default is True. While in principle it should speed things up,
sometimes the preconditioner can cause convergence problems instead,
and should be turned off. Note that this mostly makes sense for 'small'
scale problems; for 'large' ones, the improved preconditioner is one
of the main features of the algorithms and turning it off defeats the
point.
diffkernel: str, optional
Kernel to use in the integral to smooth the derivative. By default it's
the absolute value, |u'| (value: "abs"). However, it can be changed to
being the square, (u')^2 (value: "sq"). The latter produces smoother
derivatives, whereas the absolute values tends to make them more blocky.
Default is abs.
cgtol: float, optional
Tolerance to use in conjugate gradient optimisation. Default is 1e-4.
cgmaxit: int, optional
Maximum number of iterations to use in conjugate gradient optimisation.
Default is 100
Returns
-------
u : ndarray
Estimate of the regularized derivative of data. Due to
different grid assumptions, length(u) = length(data) + 1
if scale = 'small', otherwise length(u) = length(data).
"""
# Make sure we have a column vector
data = np.array(data)
assert len(data.shape) == 1, "data is not one-dimensional"
# Get the data size.
n = len(data)
# Default checking. (u0 is done separately within each method.)
if dx is None:
dx = 1.0 / n
# Different methods for small- and large-scale problems.
if (scale.lower() == 'small'):
# Differentiation operator
d0 = -np.ones(n)/dx
du = np.ones(n-1)/dx
dl = np.zeros(n-1)
dl[-1] = d0[-1]
d0[-1] *= -1
D = sparse.diags([dl, d0, du], [-1, 0, 1])
DT = D.transpose()
# Antidifferentiation and its adjoint
def A(x): return (np.cumsum(x) - 0.5 * (x + x[0])) * dx
def AT(x): return np.concatenate([[sum(x[1:])/2.0],
(sum(x)-np.cumsum(x)+0.5*x)[1:]])*dx
# Default initialization is naive derivative
if u0 is None:
u0 = D*data
u = u0.copy()
# Since Au( 0 ) = 0, we need to adjust.
ofst = data[0]
# Precompute.
ATb = AT(ofst - data) # input: size n
# Main loop.
for ii in range(1, itern+1):
if diffkernel == 'abs':
# Diagonal matrix of weights, for linearizing E-L equation.
Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
# Linearized diffusion matrix, also approximation of Hessian.
L = dx * DT * Q * D
elif diffkernel == 'sq':
L = dx * DT * D
else:
raise ValueError('Invalid diffkernel value')
# Gradient of functional.
g = AT(A(u)) + ATb + alph * L * u
#print(g)
# Prepare to solve linear equation.
if precondflag:
# Simple preconditioner.
P = alph * sparse.spdiags(L.diagonal() + 1, 0, n, n)
else:
P = None
def linop(v): return (alph * L * v + AT(A(v)))
linop = splin.LinearOperator((n, n), linop)
s, info_i = sparse.linalg.cg(
linop, g, x0=None, tol=cgtol, maxiter=cgmaxit,
callback=None, M=P, atol='legacy')
#print(s)
if diagflag:
log_iteration(ii, s[0], u, g)
if (info_i > 0):
logging.warning(
"WARNING - convergence to tolerance not achieved!")
elif (info_i < 0):
logging.warning("WARNING - illegal input or breakdown")
# Update solution.
u = u - s
#print(u)
# # Test the convergence condition
# s_norm = np.sqrt(np.sum(np.array(s).ravel() ** 2))
# u_norm = np.sqrt(np.sum(np.array(u).ravel() ** 2))
# norm = s_norm / u_norm
# print(norm)
# Display plot.
if plotflag:
plt.plot(u)
plt.show()
elif (scale.lower() == 'large'):
# Construct anti-differentiation operator and its adjoint.
def A(v): return np.cumsum(v)
def AT(w): return (sum(w) * np.ones(len(w)) -
np.transpose(np.concatenate(([0.0],
np.cumsum(w[:-1])))))
# Construct differentiation matrix.
c = np.ones(n)
D = sparse.spdiags([-c, c], [0, 1], n, n) / dx
mask = np.ones((n, n))
mask[-1, -1] = 0.0
D = sparse.dia_matrix(D.multiply(mask))
DT = D.transpose()
# Since Au( 0 ) = 0, we need to adjust.
data = data - data[0]
# Default initialization is naive derivative.
if u0 is None:
u0 = np.concatenate(([0], np.diff(data)))
u = u0
# Precompute.
ATd = AT(data)
# Main loop.
for ii in range(1, itern + 1):
if diffkernel == 'abs':
# Diagonal matrix of weights, for linearizing E-L equation.
Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
# Linearized diffusion matrix, also approximation of Hessian.
L = DT * Q * D
elif diffkernel == 'sq':
L = DT * D
else:
raise ValueError('Invalid diffkernel value')
# Gradient of functional.
g = AT(A(u)) - ATd
g = g + alph * L * u
# Build preconditioner.
if precondflag:
c = np.cumsum(range(n, 0, -1))
B = alph * L + sparse.spdiags(c[::-1], 0, n, n)
# droptol = 1.0e-2
R = sparse.dia_matrix(np.linalg.cholesky(B.todense()))
P = np.dot(R.transpose(), R)
else:
P = None
# Prepare to solve linear equation.
def linop(v): return (alph * L * v + AT(A(v)))
linop = splin.LinearOperator((n, n), linop)
s, info_i = sparse.linalg.cg(
linop, -g, x0=None, tol=cgtol, maxiter=cgmaxit, callback=None,
M=P, atol='legacy')
if diagflag:
log_iteration(ii, s[0], u, g)
if (info_i > 0):
logging.warning(
"WARNING - convergence to tolerance not achieved!")
elif (info_i < 0):
logging.warning("WARNING - illegal input or breakdown")
# Update current solution
u = u + s
# Display plot
if plotflag:
plt.plot(u / dx)
plt.show()
u = u / dx
return u
|
b4497f4ac6d09f5240f551f6d25077d2e7624af2
| 3,641,067
|
def replaceidlcode(lines,mjd,day=None):
"""
Replace IDL code in lines (array of strings) with the results of code
execution. This is a small helper function for translate_idl_mjd5_script().
"""
# day
# psfid=day+138
# domeid=day+134
if day is not None:
ind,nind = dln.where( (lines.lower().find('day')>-1) &
(lines.lower().startswith('day=')==False) )
if nind>0:
lines[ind] = lines[ind].replace('day',str(day))
# indgen
# ims=day+149+indgen(2)
ind,nind = dln.where(lines.lower().find('indgen(')>-1)
if nind>0:
lines[ind] = lines[ind].replace('indgen(','np.arange(')
# Deal with assignment lines with code to execute
ind,nind = dln.where( ((lines.lower().find('+')>-1) |
(lines.lower().find('-')>-1) |
(lines.lower().find('*')>-1) |
(lines.lower().find('np.arange')>-1)) &
(lines.lower().find('=')>-1) &
(lines.lower().find('mkplan')==-1) )
for i in range(nind):
line1 = lines[ind[i]]
lo = line1.find('=')
key = line1[0:lo]
val = eval(line1[lo+1:])
if (type(val) is int) | (type(val) is str):
lines[ind[i]] = key+'='+str(val)
else:
lines[ind[i]] = key+'='+str(list(val))
# Deal with mkplan lines with code to execute
ind,nind = dln.where( ((lines.lower().find('+')>-1) |
(lines.lower().find('-')>-1) |
(lines.lower().find('*')>-1) |
(lines.lower().find('np.arange')>-1)) &
(lines.lower().find('=')>-1) &
(lines.lower().find('mkplan')>-1) )
for i in range(nind):
line1 = lines[ind[i]]
raise ValueError('This has not been implemented yet')
return lines
|
fd8a2bc9a374c36e7973cfc01d38582e25ce9438
| 3,641,068
|
import torch
import tqdm
def test(
model: nn.Module,
classes: dict,
data_loader: torch.utils.data.DataLoader,
criterion: nn.Module,
# scheduler: nn.Module,
epoch: int,
num_iteration: int,
use_cuda: bool,
tensorboard_writer: torch.utils.tensorboard.SummaryWriter,
name_step: str,
):
""" Test a given model
Args:
model (nn.Module): model to test.
classes (dict): dictionnary containing the classes and their indice.
data_loader (torch.utils.data.DataLoader): data loader with the data to test the model on.
criterion (nn.Module): loss function.
epoch (int): epoch of training corresponding to the model.
num_iteration (int): number of iterations since the beginning of the training corresponding to the model.
use_cuda (bool): boolean to decide if cuda should be used.
tensorboard_writer (torch.utils.tensorboard.SummaryWriter): writer to write the metrics in tensorboard.
name_step (str): name of the step to write it in the description of the progress_bar
Returns:
loss (float): final loss
accuracy_top1 (float): final accuracy top1
accuracy_top5 (float): final accuracy top5
confidence_mean (float): mean confidence
"""
# Switch the model to eval mode
model.eval()
# Initialize the trackers for the loss and the accuracy
loss_tracker = utils.MetricTracker()
accuracy_top1_tracker = utils.MetricTracker()
accuracy_top5_tracker = utils.MetricTracker()
confidence_tracker = utils.MetricTracker()
# Initialize confusing matrix
confusion_matrix_tracker = utils.ConfusionMatrix(classes)
# create BackgroundGenerator and wrap it in tqdm progress bar
progress_bar = tqdm(BackgroundGenerator(data_loader, max_prefetch=32), total=len(data_loader))
for data in progress_bar:
inputs, targets = data
if use_cuda:
inputs = inputs.cuda()
targets = targets.cuda()
# forward pass
outputs = model(inputs)
loss = criterion(outputs, targets)
confidence, prediction = outputs.topk(dim=1, k=5)
# scheduler.step(loss)
# Track loss, accuracy and confidence
loss_tracker.update(loss.item())
accuracy_top1_tracker.update(
(prediction[:, 0] == targets).sum().item(), targets.numel())
accuracy_top5_tracker.update(
(prediction[:, :5] == targets[:, None]).sum().item(), targets.numel())
confidence_tracker.update(confidence[:, 0].sum().item(), targets.numel())
# Update the confusion matrix
confusion_matrix_tracker.update_confusion_matrix(targets.cpu(), prediction[:, 0].cpu())
# Update the progress_bar information
progress_bar.set_description(f"Epoch {epoch + 1}/{args.epochs} {name_step}")
progress_bar.set_postfix(
loss=f"{loss_tracker.average:05.5f}",
accuracy_top1=f"{100 * accuracy_top1_tracker.average:05.2f}",
accuracy_top5=f"{100 * accuracy_top5_tracker.average:05.2f}",)
# Add the new values to the tensorboard summary writer
tensorboard_writer.add_scalar("loss", loss_tracker.average, num_iteration)
tensorboard_writer.add_scalar("accuracy_top1", accuracy_top1_tracker.average, num_iteration)
tensorboard_writer.add_scalar("accuracy_top5", accuracy_top5_tracker.average, num_iteration)
tensorboard_writer.add_scalar(
"confidence_mean", confidence_tracker.average, num_iteration
)
tensorboard_writer.add_figure("confusion_matrix", confusion_matrix_tracker.plot_confusion_matrix(normalize=True), num_iteration)
tensorboard_writer.flush()
return (
loss_tracker.average,
accuracy_top1_tracker.average,
accuracy_top5_tracker.average,
confidence_tracker.average,
)
|
306b43730554492d1d541be1a8c8d4c202b932f4
| 3,641,069
|
import torch
def get_grad_spherical_harmonics(xyz, l, m):
"""Compute the gradient of the Real Spherical Harmonics of the AO.
Args:
xyz : array (Nbatch,Nelec,Nrbf,Ndim) x,y,z, distance component of each
point from each RBF center
l : array(Nrbf) l quantum number
m : array(Nrbf) m quantum number
Returns:
Y array (Nbatch,Nelec,Nrbf,3) : value of each grad SH at each point
"""
Y = torch.zeros_like(xyz)
# l=0
ind = (l == 0).nonzero().view(-1)
Y[:, :, ind, :] = _grad_spherical_harmonics_l0(xyz[:, :, ind, :])
# l=1
indl = (l == 1)
if torch.any(indl):
for mval in [-1, 0, 1]:
indm = (m == mval)
ind = (indl * indm).nonzero().view(-1)
if len(ind > 0):
# _tmp = _grad_spherical_harmonics_l1(xyz[:, :, ind, :], mval)
Y[:, :, ind, :] = _grad_spherical_harmonics_l1(
xyz[:, :, ind, :], mval)
# l=2
indl = (l == 2)
if torch.any(indl):
for mval in [-2, -1, 0, 1, 2]:
indm = (m == mval)
ind = (indl * indm).nonzero().view(-1)
if len(ind > 0):
Y[:, :, ind, :] = _grad_spherical_harmonics_l2(
xyz[:, :, ind, :], mval)
return Y
|
a01b529f98276e41fa3ed8c9934db770979d8702
| 3,641,070
|
from typing import List
from typing import Union
import re
def references_from_string(string: str) -> List[
Union[InputReference, TaskReference, ItemReference]
]:
"""Generate a reference object from a reference string
Arguments:
string {str} -- A reference string (eg: `{{inputs.example}}`)
Raises:
ValueError: Input string cannot be parsed as a reference object
Returns:
List[Union[InputReference, TaskReference, ItemReference]] -- A list of reference objects
"""
pattern = r"{{\s*([_a-zA-Z0-9.\-\$#\?]*)\s*}}"
match = re.findall(pattern, string, flags=re.MULTILINE)
refs = []
for ref in match:
split_ref = ref.split('.')
ref_type = split_ref[0]
if ref_type == 'input':
assert len(split_ref) == 2, \
f'Input Reference must be in formatted as "input.variable" not {ref}.'
ref = InputReference(variable=split_ref[1])
elif ref_type == 'tasks':
assert len(split_ref) == 3, \
ValueError(
f'Task Reference should be in format "tasks.task-name.variable" but'
f' found: {ref}'
)
ref = TaskReference(
name=split_ref[1], variable=split_ref[2])
elif ref_type == 'item':
variable = '.'.join(split_ref[1:])
ref = ItemReference(variable=variable)
else:
raise ValueError(f'Reference of type {ref_type} not recognized: {ref}')
refs.append(ref)
return refs
|
4e28d082e7fc638470b2f4753e6283d9630be073
| 3,641,072
|
def arrange_images(normalized_posters, blur_factor, blur_radius):
"""
Arranges images to create a collage.
Arguments:
norm_time_posters: tuple(float, PIL.Image)
Normalized instances of time and area for time and posters respectively.
blur_factor:
Number of times to apply a blurring operation to diffuse wasted space.
blur_radius:
Radius of neighbourhood for use as Gaussian blurring parameter.
Returns:
collage: np.array
A collage of images heuristically packed together.
"""
# as a greedy heuristic sort by size first to minimize wasted area
normalized_posters = sorted(
normalized_posters, key=lambda x: x.size[0] * x.size[1], reverse=True
)
sizes = [x.size for x in normalized_posters]
positions = rpack.pack(sizes)
max_width = max(a[0] + b[0] for a, b in zip(positions, sizes))
max_height = max(a[1] + b[1] for a, b in zip(positions, sizes))
collage = np.full([max_height + 1, max_width + 1, 3], 255, dtype=np.uint8)
deadspace = np.full_like(collage, True)
# place images
for (x, y), img in zip(positions, normalized_posters):
dx, dy = img.size
collage[y : y + dy, x : x + dx] = img
deadspace[y : y + dy, x : x + dx] = False
# identify all deadspace which looks harsh on the eyes
deadspace = np.where(deadspace)
# diffuse deadspace to get a softer background
gaussian_blur = ImageFilter.GaussianBlur(radius=blur_radius)
for _ in range(blur_factor):
blurred = Image.fromarray(collage).filter(gaussian_blur)
collage[deadspace] = np.array(blurred)[deadspace]
return collage
|
e3d4dc90c8ff4ec435061a3507423cd8c5f7c6d4
| 3,641,073
|
def make_text(text, position=(0, 0, 0), height=1):
"""
Return a text object at the specified location with a given height
"""
sm = SpriteMaterial(map=TextTexture(string=text, color='white', size=100, squareTexture=False))
return Sprite(material=sm, position = position, scaleToTexture=True, scale=[1, height, 1])
|
19daad7ae7f93ce1a4f06596fe2799c8e9701b72
| 3,641,074
|
import copy
def get_features(user_features, documents, ARGS, BOW = False, Conversational = False, User = False, SNAPSHOT_LEN = False, Questions = False, COMMENT_LEN = True):
"""
Generates Features:
Type of Features:
- BOW: bag of words features
- Conversational: features extracted from the conversation
- User: features based on participant information
- SNAPSHOT_LEN: number of comments in the final snapshot
- Questions: question features
- COMMENT_LEN: number of comments added to the conversation
"""
STATUS, ASPECTS, attacker_profile_ASPECTS, LEXICONS, QUESTIONS, UNIGRAMS_LIST, BIGRAMS_LIST = ARGS
feature_sets = []
# BOW features
bow_features = []
for pair in documents:
conversation, clss, conv_id = pair
feature_set = {}
# exclude last action
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
actions = sorted(actions, \
key=lambda k: (k['timestamp_in_sec'], k['id'].split('.')[1], k['id'].split('.')[2]))[::-1]
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
# update feature set
feature_set.update(_get_term_features(comments_actions, UNIGRAMS_LIST, BIGRAMS_LIST))
bow_features.append((copy.deepcopy(feature_set), clss))
# Conversational featrues
conv_features = []
for pair in documents:
conversation, clss, conv_id = pair
feature_set = {}
# exclude last action
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
actions = sorted(actions, \
key=lambda k: (k['timestamp_in_sec'], k['id'].split('.')[1], k['id'].split('.')[2]))[::-1]
# only keep comment adding and section creation
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
# conversational features from all actions that adds a comment
feature_set.update(_get_global_action_features(comments_actions))
# conversational features from the last N actions that adds a comment
feature_set.update(_get_last_n_action_features(comments_actions, 1, LEXICONS))
# conversational features from the last action that adds a comment of each participant
feature_set.update(_get_action_features(comments_actions, LEXICONS))
# conversational features based on a single participant's behavior in the conversation
feature_set.update(_get_repeatition_features(comments_actions))
# question features
if Questions:
feature_set.update(_get_question_features(conv_id, QUESTIONS))
actions = actions[::-1]
# conversational features based on reply relations
feature_set.update(_get_balance_features(actions))
# number of comments in last snapshot
if SNAPSHOT_LEN:
feature_set['snapshot_len'] = conversation['snapshot_len']
conv_features.append((copy.deepcopy(feature_set), clss))
# pariticipant features
# extract the last participant's profile
participant_features = []
starter_attack_profiles = {0: [], 1:[]}
non_starter_attack_profiles = {0: [], 1: []}
all_profiles = {0: [], 1: []}
blocks = []
user_info = []
for ind, pair in enumerate(documents):
conversation, clss, conv_id = pair
# is the starter of the conversation also the last participant in the conversation
actions = conversation['action_feature']
start_time = min([a['timestamp_in_sec'] for a in actions])
end_time = max([a['timestamp_in_sec'] for a in actions])
for a in actions:
if a['timestamp_in_sec'] == start_time:
if 'user_text' in a:
starter = a['user_text']
else:
starter = 'anon'
if a['timestamp_in_sec'] == end_time:
if 'user_text' in a:
ender = a['user_text']
else:
ender = 'anon'
feature_set, user_infos = _user_features(actions, user_features[conv_id], ASPECTS, STATUS, QUESTIONS[conv_id])
# last participant's profile
p, b = attacker_profile(conversation, user_infos, attacker_profile_ASPECTS)
user_info.append(user_infos)
if starter == ender:
starter_attack_profiles[clss].append(p)
else:
non_starter_attack_profiles[clss].append(p)
all_profiles[clss].append(p)
# participants' block histories
blocks.append(int(b))
# update participant features
participant_features.append((copy.deepcopy(feature_set), clss))
feature_sets = []
# update the returned feature set given the parameters
for ind, pair in enumerate(documents):
conversation, clss, conv_id = pair
actions = conversation['action_feature']
end_time = max([a['timestamp_in_sec'] for a in actions])
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
comments_actions = [a for a in actions if a['comment_type'] == 'SECTION_CREATION' or a['comment_type'] == 'COMMENT_ADDING']
feature_set = {}
if COMMENT_LEN:
feature_set = {'no_comments': len(comments_actions)}
if BOW:
feature_set.update(bow_features[ind][0])
if Conversational:
feature_set.update(conv_features[ind][0])
if User:
feature_set.update(participant_features[ind][0])
feature_sets.append((feature_set, clss))
return user_info, starter_attack_profiles, non_starter_attack_profiles, all_profiles, feature_sets
|
b200c9783661db13bfecb76eee18f39bc67301b6
| 3,641,075
|
import time
import traceback
def incomeStat(headers):
"""
收益统计
:param headers:
:return:
"""
time.sleep(0.3)
url = f'https://kd.youth.cn/wap/user/balance?{headers["Referer"].split("?")[1]}'
try:
response = requests_session().get(url=url, headers=headers, timeout=50).json()
print('收益统计')
print(response)
if response['status'] == 0:
return response
else:
return
except:
print(traceback.format_exc())
return
|
8c023314835ab46b354ae0d793d6de3694711a65
| 3,641,076
|
def t_matrix(phi, theta, psi, sequence):
""" Return t_matrix to convert angle rate to angular velocity"""
if sequence == 'ZYX':
t_m = np.array([[1, np.sin(phi)*np.tan(theta), np.cos(phi)*np.tan(theta)],\
[0, np.cos(phi), -np.sin(phi)],\
[0, np.sin(phi)/np.cos(theta), np.cos(phi)/np.cos(theta)]])
else:
t_m = np.eye(3)
return t_m
|
65c5595cc286a442777651f888a6e3fee032ba21
| 3,641,077
|
def point_in_fence(x, y, points):
"""
计算点是否在围栏内
:param x: 经度
:param y: 纬度
:param points: 格式[[lon1,lat1],[lon2,lat2]……]
:return:
"""
count = 0
x1, y1 = points[0]
x1_part = (y1 > y) or ((x1 - x > 0) and (y1 == y)) # x1在哪一部分中
points.append((x1, y1))
for point in points[1:]:
x2, y2 = point
x2_part = (y2 > y) or ((x2 > x) and (y2 == y)) # x2在哪一部分中
if x2_part == x1_part:
x1, y1 = x2, y2
continue
mul = (x1 - x) * (y2 - y) - (x2 - x) * (y1 - y)
if mul > 0: # 叉积大于0 逆时针
count += 1
elif mul < 0:
count -= 1
x1, y1 = x2, y2
x1_part = x2_part
if count == 2 or count == -2:
return True
else:
return False
|
bb25f399eadf818fbafdeee6c8adbb1254a579f7
| 3,641,078
|
def parse_prior(composition, alphabet, weight=None):
"""Parse a description of the expected monomer distribution of a sequence.
Valid compositions:
* None or 'none'
No composition sepecified
* 'auto' or 'automatic'
Use the typical average distribution
for proteins and an equiprobable distribution for
everything else.
* 'equiprobable'
All monomers have the same probability.
* a percentage, e.g. '45%' or a fraction '0.45'
The fraction of CG bases for nucleotide alphabets
* a species name, e.g. 'E. coli', 'H. sapiens',
Use the average CG percentage for the species's genome.
* An explicit distribution
e.g. {'A':10, 'C':40, 'G':40, 'T':10}
"""
if composition is None:
return None
comp = composition.strip()
if comp.lower() == "none":
return None
if weight is None and alphabet is not None:
weight = sqrt(float(len(alphabet)))
if weight < 0:
raise ValueError("Weight cannot be negative.")
if comp.lower() == "equiprobable":
prior = weight * equiprobable_distribution(len(alphabet))
elif comp.lower() == "auto" or comp.lower() == "automatic":
if alphabet == unambiguous_protein_alphabet:
prior = weight * asarray(aa_composition, float64)
else:
prior = weight * equiprobable_distribution(len(alphabet))
elif comp in std_percentCG:
prior = weight * base_distribution(std_percentCG[comp])
elif comp[-1] == "%":
prior = weight * base_distribution(float(comp[:-1]))
elif isfloat(comp):
prior = weight * base_distribution(float(comp) * 100.0)
elif composition[0] == "{" and composition[-1] == "}":
explicit = composition[1:-1]
explicit = (
explicit.replace(",", " ")
.replace("'", " ")
.replace('"', " ")
.replace(":", " ")
.split()
)
if len(explicit) != len(alphabet) * 2:
raise ValueError("Explicit prior does not match length of alphabet")
prior = -ones(len(alphabet), float64)
try:
for r in range(len(explicit) // 2):
letter = explicit[r * 2]
index = alphabet.ord(letter)
value = float(explicit[r * 2 + 1])
prior[index] = value
except ValueError:
raise ValueError("Cannot parse explicit composition")
if any(prior == -1.0):
raise ValueError(
"Explicit prior does not match alphabet"
) # pragma: no cover
prior /= sum(prior)
prior *= weight
else:
raise ValueError("Unknown or malformed composition: %s" % composition)
if len(prior) != len(alphabet):
raise ValueError(
"The sequence alphabet and composition are incompatible."
) # pragma: no cover
return prior
|
50795a21231138bb3576a50a3791d9136264754e
| 3,641,079
|
def get_queues(prefix=None):
"""
Gets a list of SQS queues. When a prefix is specified, only queues with names
that start with the prefix are returned.
:param prefix: The prefix used to restrict the list of returned queues.
:return: A list of Queue objects.
"""
if prefix:
queue_iter = sqs.queues.filter(QueueNamePrefix=prefix)
else:
queue_iter = sqs.queues.all()
queues = list(queue_iter)
if queues:
logger.info("Got queues: %s", ', '.join([q.url for q in queues]))
else:
logger.warning("No queues found.")
return queues
|
c459cad66561d887abdcc40157fea09481e267c7
| 3,641,080
|
def _genNodesNormal(numNodes=None, center=None, standardDeviation=None):
"""
Generate randomized node using Normal distribution within a bounding area
Parameters
----------
numNodes: int
Required, number of nodes to be generated
centerLat: float, Required
Latitude of the center point
centerLon: float, Required
Longitude of the center point
standardDeviation: float, Required
StandardDeviation of normal distribution
Returns
-------
list of lists
A list of coordinates uniformly distributed
"""
# Initialize
locs = []
# Randomized generate nodes in normal distribution
for i in range(numNodes):
rndUniform = np.random.uniform(0, 360)
rndNormal = np.random.normal(0, standardDeviation)
newLoc = geoPointInDistance2D(center, rndUniform, rndNormal)
locs.append(newLoc)
return locs
|
62d7f44056621786a1b796bfe85df4eac0ec9574
| 3,641,081
|
def util_test_normalize(mean, std, op_type):
"""
Utility function for testing Normalize. Input arguments are given by other tests
"""
if op_type == "cpp":
# define map operations
decode_op = c_vision.Decode()
normalize_op = c_vision.Normalize(mean, std)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=normalize_op)
elif op_type == "python":
# define map operations
transforms = [
py_vision.Decode(),
py_vision.ToTensor(),
py_vision.Normalize(mean, std)
]
transform = py_vision.ComposeOp(transforms)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(input_columns=["image"], operations=transform())
else:
raise ValueError("Wrong parameter value")
return data
|
43fe33a124a8d52252738697eccd4775edb6e4b8
| 3,641,082
|
def calc_sub_from_constant(func, in_data, **kwargs):
"""[SubFromConstant](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.add.html)
See the documentation for [AddConstant](#addconstant)
"""
return _calc(func, in_data, **kwargs)
|
d40fcf668c2dc8e0c7d889d2cf145de208cc0ae6
| 3,641,083
|
import torch
def mask_distance_matrix(dmat, weight_bins=weight_bins):
"""
Answer: yep, a larger weight is assigned to a pair of residues forming a contact.
I assigned 20.5, 5.4, 1 to the distance 0-8, 8-15, and >15, respectively, for residue pairs (i, j) where |i-j| >=24.
These numbers were derived from simple statistics of an old training set.
However, you don't have to be very accurate here.
When |i-j| is small, you can reduce 20.5 and 5.4 to smaller values.
:param dmat: A distance matrix
:param bins: The quantized distance matrix
:return: The quantized distance matrix
"""
b, m, n = dmat.size()
imj = b * [[[abs(i-j) >= 24 for j in range(n)] for i in range(m)]]
t_imj = torch.tensor(imj, dtype=torch.float, device=device)
masks = quantize_distance_matrix(dmat, weight_bins, False)
return masks, t_imj
|
4d8ca64834b6de9e4dd0077278cb4a687f7cf33e
| 3,641,084
|
def get_output():
"""Return the set output setting."""
return _output
|
4332661d333d4ca2c364761b35bb2d9ed0b9d302
| 3,641,085
|
def cash_grouped_nb(target_shape, cash_flow_grouped, group_lens, init_cash_grouped):
"""Get cash series per group."""
check_group_lens(group_lens, target_shape[1])
out = np.empty_like(cash_flow_grouped)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
cash_now = init_cash_grouped[group]
for i in range(cash_flow_grouped.shape[0]):
flow_value = cash_flow_grouped[i, group]
cash_now = add_nb(cash_now, flow_value)
out[i, group] = cash_now
from_col = to_col
return out
|
3a7978493503cbae4fc867d8ca864193d913f33f
| 3,641,086
|
def agreement():
"""Input for Accepting license
"""
form = LicenseForm()
if form.validate_on_submit():
gluu_settings.db.set("ACCEPT_GLUU_LICENSE", "Y" if form.accept_gluu_license.data else "N")
return redirect(url_for(wizard_steps.next_step()))
with open("./LICENSE", "r") as f:
agreement_file = f.read()
if request.method == "GET":
# populate form data from settings
form.accept_gluu_license.data = gluu_settings.db.get("ACCEPT_GLUU_LICENSE")
wizard_steps.current_step = 'license'
return render_template("wizard/index.html",
license=agreement_file,
form=form,
current_step=wizard_steps.step_number(),
template="license")
|
e213d9ce33014b03fd97fdd3991eb72c52e3e9e7
| 3,641,087
|
import torch
def rotmat2quat(mat: torch.Tensor) -> torch.Tensor:
"""Converts rotation matrix to quaternion.
This uses the algorithm found on
https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion
, and follows the code from ceres-solver
https://github.com/ceres-solver/ceres-solver/blob/master/include/ceres/rotation.h
"""
mat_shape = mat.shape
assert mat_shape[-2:] == (3, 3)
mat = torch.reshape(mat, [-1, 3, 3])
# Case A: Easy case
r = torch.sqrt(torch.clamp_min(1. + mat[:, 0, 0] + mat[:, 1, 1] + mat[:, 2, 2], 0.0))
s = 0.5 / r
quat = torch.stack([
0.5 * r,
(mat[:, 2, 1] - mat[:, 1, 2]) * s,
(mat[:, 0, 2] - mat[:, 2, 0]) * s,
(mat[:, 1, 0] - mat[:, 0, 1]) * s
], dim=-1)
near_pi = isclose(r, 0.0)
if torch.sum(near_pi) > 0:
# Case B0, B1, B2: ~180deg rotation
quats1 = mat.new_zeros([mat.shape[0], 3, 4])
case_idx = torch.argmax(torch.diagonal(mat, dim1=-1, dim2=-2), dim=-1)
for case, (i, j, k) in enumerate([[0, 1, 2], [1, 2, 0], [2, 0, 1]]):
r = torch.sqrt(mat[..., i, i] - mat[..., j, j] - mat[..., k, k] + 1.0)
s = 0.5 / r
quats1[:, case, 0] = (mat[:, k, j] - mat[:, j, k]) * s
quats1[:, case, i + 1] = 0.5 * r
quats1[:, case, j + 1] = (mat[:, i, j] + mat[:, j, i]) * s
quats1[:, case, k + 1] = (mat[:, k, i] + mat[:, i, k]) * s
quat1 = quats1[torch.arange(mat.shape[0]), case_idx, :]
quat[near_pi] = quat1[near_pi]
quat = torch.reshape(quat, [*mat_shape[:-2], 4])
return quat
|
470d2890eb5c07dff1fdc3de7d347fc86dd3fd1e
| 3,641,088
|
def equalize(image):
"""
Equalize the image histogram. This function applies a non-linear
mapping to the input image, in order to create a uniform
distribution of grayscale values in the output image.
Args:
image (PIL image): Image to be equalized
Returns:
image (PIL image), Equalized image.
"""
return ImageOps.equalize(image)
|
5283609b316452da5aa9969e999dcdeb4de26b2b
| 3,641,089
|
def validate_output(value):
"""Validate "output" parameter."""
if value is not None:
if isinstance(value, str):
value = value.split(",")
# filter out empty names
value = list(filter(None, value))
return value
|
f00773674868ebde741f64b47fdc3372ad6a1e7d
| 3,641,090
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.