content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""Methods for handling probability distributions."""
import numpy
import pandas
import scipy.stats
from gewittergefahr.gg_utils import error_checking
FEATURE_NAMES_KEY = 'feature_names'
FEATURE_MEANS_KEY = 'feature_means'
COVARIANCE_MATRIX_KEY = 'covariance_matrix'
COVAR_MATRIX_INVERSE_KEY = 'covar_matrix_inverse'
COVAR_MATRIX_DETERMINANT_KEY = 'covar_matrix_determinant'
PRIOR_CLASS_PROBABILITY_KEY = 'prior_class_probability'
ORIG_FEATURE_TABLE_KEY = 'orig_feature_table'
MIN_CUMUL_DENSITY_FOR_NORMAL_DIST = 1e-6
MAX_CUMUL_DENSITY_FOR_NORMAL_DIST = 1. - 1e-6
def _transform_each_marginal_to_uniform(
new_feature_table, orig_feature_table=None):
"""Transforms marginal distribution of each feature to uniform distribution.
This method transforms data in `new_feature_table` only.
If `orig_feature_table` is None, the transformation for feature "x" in the
[i]th example will be based on the percentile score of
new_feature_table["x"].values[i] in new_feature_table["x"].values.
If `orig_feature_table` is specified, the transformation for feature "x" in
the [i]th example will be based on the percentile score of
new_feature_table["x"].values[i] in orig_feature_table["x"].values.
P = number of original examples
Q = number of new examples
M = number of features
:param new_feature_table: pandas DataFrame with Q rows and M columns.
Column names are feature names.
:param orig_feature_table: pandas DataFrame with P rows and M columns.
Column names are feature names.
:return: transformed_new_feature_table: Same as input, except that the
marginal distribution of each column is uniform.
"""
# TODO(thunderhoser): I could probably make this faster for cases where
# `orig_feature_table` is specified.
feature_names = list(new_feature_table)
new_feature_matrix = new_feature_table[feature_names].to_numpy()
if orig_feature_table is not None:
error_checking.assert_columns_in_dataframe(
orig_feature_table, feature_names)
orig_feature_matrix = orig_feature_table[feature_names].to_numpy()
num_features = len(feature_names)
num_new_examples = new_feature_matrix.shape[0]
transformed_new_feature_table = None
for j in range(num_features):
new_indices_to_use = numpy.where(
numpy.invert(numpy.isnan(new_feature_matrix[:, j])))[0]
transformed_values = numpy.full(num_new_examples, 0.5)
if orig_feature_table is None:
these_ranks = scipy.stats.rankdata(
new_feature_matrix[new_indices_to_use, j], method='average')
transformed_values[new_indices_to_use] = (
these_ranks / len(new_indices_to_use))
else:
orig_indices_to_use = numpy.where(
numpy.invert(numpy.isnan(orig_feature_matrix[:, j])))[0]
for i in new_indices_to_use:
transformed_values[i] = scipy.stats.percentileofscore(
orig_feature_matrix[orig_indices_to_use, j],
new_feature_matrix[i, j], kind='weak') / 100
if transformed_new_feature_table is None:
transformed_new_feature_table = pandas.DataFrame.from_dict(
{feature_names[j]: transformed_values})
else:
transformed_new_feature_table = (
transformed_new_feature_table.assign(
**{feature_names[j]: transformed_values}))
return transformed_new_feature_table
def _transform_each_marginal_to_normal(
new_feature_table, orig_feature_table=None):
"""Transforms marginal distribution of each feature to normal distribution.
To learn about the roles of `new_feature_table` and `orig_feature_table`,
see documentation for _transform_each_marginal_to_uniform.
:param new_feature_table: See doc for _transform_each_marginal_to_uniform.
:param orig_feature_table: See doc for _transform_each_marginal_to_uniform.
:return: transformed_new_feature_table: Same as input, except that the
marginal distribution of each column is normal.
"""
transformed_new_feature_table = _transform_each_marginal_to_uniform(
new_feature_table, orig_feature_table)
for this_column in list(transformed_new_feature_table):
these_values = transformed_new_feature_table[this_column].values
these_values[these_values < MIN_CUMUL_DENSITY_FOR_NORMAL_DIST] = (
MIN_CUMUL_DENSITY_FOR_NORMAL_DIST)
these_values[these_values > MAX_CUMUL_DENSITY_FOR_NORMAL_DIST] = (
MAX_CUMUL_DENSITY_FOR_NORMAL_DIST)
these_values = scipy.stats.norm.ppf(these_values, loc=0, scale=1)
transformed_new_feature_table = transformed_new_feature_table.assign(
**{this_column: these_values})
return transformed_new_feature_table
def _normalize_class_probabilities(class_probability_matrix):
"""Normalizes class probabilities, so that probs for each example sum to 1.
N = number of examples
K = number of classes
:param class_probability_matrix: N-by-K numpy array of class probabilities.
:return: class_probability_matrix: Same as input, except that each row sums
to 1.
"""
error_checking.assert_is_geq_numpy_array(
class_probability_matrix, 0., allow_nan=True)
row_with_nan_flags = numpy.any(
numpy.isnan(class_probability_matrix), axis=1)
row_with_nan_indices = numpy.where(row_with_nan_flags)[0]
class_probability_matrix[row_with_nan_indices, :] = numpy.nan
max_original_prob = numpy.nanmax(class_probability_matrix)
min_original_prob = numpy.nanmin(class_probability_matrix)
class_probability_matrix = (
(class_probability_matrix - min_original_prob) /
(max_original_prob - min_original_prob))
row_without_nan_indices = numpy.where(numpy.invert(row_with_nan_flags))[0]
for this_row in row_without_nan_indices:
class_probability_matrix[this_row, :] = (
class_probability_matrix[this_row, :] /
numpy.sum(class_probability_matrix[this_row, :]))
return class_probability_matrix
def _get_feature_means(feature_matrix):
"""Computes mean of each feature.
N = number of examples
M = number of features (input variables)
:param feature_matrix: N-by-M numpy array. feature_matrix[i, j] is the
value of the [j]th feature for the [i]th example.
:return: feature_means: length-M numpy array with mean value of each
feature.
"""
return numpy.nanmean(feature_matrix, axis=0)
def _get_covariance_matrix(feature_matrix, assume_diagonal=False):
"""Computes covariance matrix.
N = number of examples
M = number of features (input variables)
:param feature_matrix: N-by-M numpy array. feature_matrix[i, j] is the
value of the [j]th feature for the [i]th example.
:param assume_diagonal: Boolean flag. If True, assumes diagonal covariance
matrix.
:return: covariance_matrix: M-by-M numpy array. Entry [i, j] is the
covariance between the [i]th and [j]th features.
:return: feature_means: length-M numpy array with mean value of each
feature.
"""
num_features = feature_matrix.shape[1]
feature_means = _get_feature_means(feature_matrix)
if assume_diagonal:
covariance_matrix = numpy.diag(
numpy.nanvar(feature_matrix, axis=0, ddof=1))
else:
covariance_matrix = numpy.full((num_features, num_features), numpy.nan)
for j in range(num_features):
for k in range(j + 1):
nan_flags = numpy.logical_or(
numpy.isnan(feature_matrix[:, j]),
numpy.isnan(feature_matrix[:, k]))
not_nan_indices = numpy.where(numpy.invert(nan_flags))[0]
covariance_matrix[j, k] = numpy.sum(
(feature_matrix[not_nan_indices, j] - feature_means[j]) *
(feature_matrix[not_nan_indices, k] - feature_means[k]))
covariance_matrix[j, k] = (
(1. / (len(not_nan_indices) - 1)) * covariance_matrix[j, k])
covariance_matrix[k, j] = covariance_matrix[j, k]
return covariance_matrix, feature_means
def fit_multivariate_normal(feature_table, assume_diagonal_covar_matrix=False):
"""Fits data to a multivariate normal distribution.
N = number of examples
M = number of features (input variables)
:param feature_table: pandas DataFrame with N rows and M columns. Column
names are feature names.
:param assume_diagonal_covar_matrix: Boolean flag. If True, will assume a
diagonal covariance matrix (where all off-diagonal entries are zero).
This eliminates the risk of a singular (non-invertible) covariance
matrix.
:return: multivariate_normal_dict: Dictionary with the following keys.
multivariate_normal_dict['feature_names']: length-M list of feature names.
multivariate_normal_dict['feature_means']: length-M numpy array with mean
value of each feature.
multivariate_normal_dict['covariance_matrix']: M-by-M numpy array. Entry
[i, j] is the covariance between the [i]th and [j]th features.
multivariate_normal_dict['covar_matrix_inverse']: Inverse of covariance
matrix.
multivariate_normal_dict['covar_matrix_determinant']: Determinant of
covariance matrix.
:raises: ValueError: if any column of feature_matrix has < 2 real values
(not NaN).
"""
error_checking.assert_is_boolean(assume_diagonal_covar_matrix)
num_real_values_by_feature = numpy.sum(
numpy.invert(numpy.isnan(feature_table.to_numpy())), axis=0)
if numpy.any(num_real_values_by_feature < 2):
raise ValueError('Each column of feature_table must have >= 2 real '
'values (not NaN).')
covariance_matrix, feature_means = _get_covariance_matrix(
_transform_each_marginal_to_normal(feature_table).to_numpy(),
assume_diagonal=assume_diagonal_covar_matrix)
return {FEATURE_NAMES_KEY: list(feature_table),
FEATURE_MEANS_KEY: feature_means,
COVARIANCE_MATRIX_KEY: covariance_matrix,
COVAR_MATRIX_INVERSE_KEY: numpy.linalg.inv(covariance_matrix),
COVAR_MATRIX_DETERMINANT_KEY: numpy.linalg.det(covariance_matrix)}
def fit_mvn_for_each_class(feature_table, class_labels, num_classes,
assume_diagonal_covar_matrix=False):
"""For each class, fits data to a multivariate normal distribution.
N = number of examples
M = number of features (input variables)
K = number of classes
:param feature_table: pandas DataFrame with N rows and M columns. Column
names are feature names.
:param class_labels: length-N numpy array of class labels. Should be
integers ranging from 0...[num_classes - 1].
:param num_classes: Number of classes.
:param assume_diagonal_covar_matrix: See documentation for
fit_multivariate_normal.
:return: list_of_mvn_dictionaries: length-K list of dictionaries, each with
the following keys.
list_of_mvn_dictionaries[k]['prior_class_probability']: Prior probability of
[k]th class. This is the frequency of value (k - 1) in `class_labels`.
list_of_mvn_dictionaries[k]['orig_feature_table']: Original feature table
(before transforming marginals to normal distribution) for [k]th class.
list_of_mvn_dictionaries[k]['feature_names']: length-M list of feature names
(same for each class).
list_of_mvn_dictionaries[k]['feature_means']: length-M numpy array with mean
value of each feature, given the [k]th class.
list_of_mvn_dictionaries[k]['covariance_matrix']: M-by-M numpy array.
Covariance matrix, given the [k]th class.
list_of_mvn_dictionaries[k]['covar_matrix_inverse']: Inverse of covariance
matrix for [k]th class.
list_of_mvn_dictionaries[k]['covar_matrix_determinant']: Determinant of
covariance matrix for [k]th class.
:raises: ValueError: if any class is not represented in `class_labels`.
"""
num_examples = len(feature_table.index)
error_checking.assert_is_integer(num_classes)
error_checking.assert_is_geq(num_classes, 2)
error_checking.assert_is_integer_numpy_array(class_labels)
error_checking.assert_is_numpy_array(
class_labels, exact_dimensions=numpy.array([num_examples]))
error_checking.assert_is_geq_numpy_array(class_labels, 0)
error_checking.assert_is_less_than_numpy_array(class_labels, num_classes)
list_of_mvn_dictionaries = []
for k in range(num_classes):
these_flags = class_labels == k
if not numpy.any(these_flags):
error_string = ('Class {0:d} (label {1:d}) does not exist in the '
'input data.').format(k + 1, k)
raise ValueError(error_string)
these_indices = numpy.where(these_flags)[0]
this_dict = fit_multivariate_normal(
feature_table.iloc[these_indices],
assume_diagonal_covar_matrix=assume_diagonal_covar_matrix)
this_dict.update({PRIOR_CLASS_PROBABILITY_KEY:
float(len(these_indices)) / num_examples})
this_dict.update(
{ORIG_FEATURE_TABLE_KEY: feature_table.iloc[these_indices]})
list_of_mvn_dictionaries.append(this_dict)
return list_of_mvn_dictionaries
def apply_mvn_for_each_class(feature_table, list_of_mvn_dictionaries):
"""Uses multivariate normal distributions to predict class probabilities.
In other words, this method applies previously fit MVN distributions to
predict new examples. Thus, `feature_matrix` may not be the same feature
matrix used to fit the distributions (i.e., used to create
`list_of_mvn_dictionaries`).
N = number of examples
M = number of features (input variables)
K = number of classes
:param feature_table: pandas DataFrame with N rows and M columns. Column
names are feature names.
:param list_of_mvn_dictionaries: length-K list of dictionaries created by
fit_mvn_for_each_class.
:return: forecast_prob_matrix: N-by-K numpy array of forecast probabilities.
Entry [i, k] is the forecast probability that the [i]th example comes
from the [k]th class.
:raises: ValueError: if list_of_mvn_dictionaries[k]["feature_names"] does
not match columns of feature_table for any class k.
"""
num_classes = len(list_of_mvn_dictionaries)
feature_names = list(feature_table)
num_features = len(feature_names)
for k in range(num_classes):
features_match = (
len(list_of_mvn_dictionaries[k][FEATURE_NAMES_KEY]) == num_features
and list_of_mvn_dictionaries[k][FEATURE_NAMES_KEY] == feature_names)
if not features_match:
error_string = (
str(feature_names) + '\n\nFeature names in feature_table ' +
'(shown above) and the MVN dictionary for class ' + str(k + 1) +
' (shown below) do not match.\n\n' +
str(list_of_mvn_dictionaries[k][FEATURE_NAMES_KEY]))
raise ValueError(error_string)
num_examples = len(feature_table.index)
forecast_prob_matrix = numpy.full((num_examples, num_classes), numpy.nan)
for k in range(num_classes):
transformed_feature_table = _transform_each_marginal_to_normal(
feature_table, orig_feature_table=
list_of_mvn_dictionaries[k][ORIG_FEATURE_TABLE_KEY])
transformed_feature_matrix = transformed_feature_table.to_numpy()
for i in range(num_examples):
this_deviation_vector = (
transformed_feature_matrix[i, :] -
list_of_mvn_dictionaries[k][FEATURE_MEANS_KEY])
this_deviation_vector = numpy.reshape(
this_deviation_vector, (num_features, 1))
this_matrix_product = numpy.dot(
list_of_mvn_dictionaries[k][COVAR_MATRIX_INVERSE_KEY],
this_deviation_vector)
this_matrix_product = numpy.dot(
numpy.transpose(this_deviation_vector), this_matrix_product)[
0, 0]
forecast_prob_matrix[i, k] = -0.5 * (
numpy.log(
list_of_mvn_dictionaries[k][COVAR_MATRIX_DETERMINANT_KEY]) +
this_matrix_product)
forecast_prob_matrix = numpy.exp(
forecast_prob_matrix - 0.5 * num_classes * numpy.log(2 * numpy.pi))
for k in range(num_classes):
forecast_prob_matrix[:, k] = (
forecast_prob_matrix[:, k] *
list_of_mvn_dictionaries[k][PRIOR_CLASS_PROBABILITY_KEY])
return _normalize_class_probabilities(forecast_prob_matrix)
class MultivariateNormalDist(object):
"""Class for multivariate normal distributions.
Has `fit` and `predict` methods, just like scikit-learn models.
"""
DEFAULT_NUM_CLASSES = 2
def __init__(self, num_classes=DEFAULT_NUM_CLASSES,
assume_diagonal_covar_matrix=False):
"""Constructor.
:param num_classes: Number of classes for target variable. See
documentation for fit_mvn_for_each_class.
:param assume_diagonal_covar_matrix: See documentation for
fit_multivariate_normal.
"""
self.list_of_mvn_dictionaries = None
self.num_classes = num_classes
self.assume_diagonal_covar_matrix = assume_diagonal_covar_matrix
def fit(self, feature_table, class_labels):
"""Fits data to a multivariate normal distribution for each class.
:param feature_table: See documentation for fit_mvn_for_each_class.
:param class_labels: See documentation for fit_mvn_for_each_class.
"""
self.list_of_mvn_dictionaries = fit_mvn_for_each_class(
feature_table, class_labels, num_classes=self.num_classes,
assume_diagonal_covar_matrix=self.assume_diagonal_covar_matrix)
def predict(self, feature_table):
"""Uses multivariate normal distributions to predict class probs.
:param feature_table: See documentation for apply_mvn_for_each_class.
:return: forecast_prob_matrix: See documentation for
apply_mvn_for_each_class.
"""
return apply_mvn_for_each_class(
feature_table, self.list_of_mvn_dictionaries)
| [
37811,
46202,
329,
9041,
12867,
24570,
526,
15931,
198,
198,
11748,
299,
32152,
198,
11748,
19798,
292,
198,
11748,
629,
541,
88,
13,
34242,
198,
6738,
308,
413,
1967,
469,
69,
993,
81,
13,
1130,
62,
26791,
1330,
4049,
62,
41004,
198,... | 2.370356 | 7,833 |
import gzip
from logging.handlers import TimedRotatingFileHandler
import os
import shutil
| [
11748,
308,
13344,
198,
6738,
18931,
13,
4993,
8116,
1330,
5045,
276,
24864,
803,
8979,
25060,
198,
11748,
28686,
198,
11748,
4423,
346,
628,
198
] | 3.68 | 25 |
import json
import os
import re
import subprocess
import sys
from time import sleep
import requests
SIGNATURE = (
"This formatting comment was generated automatically by a script in"
" [uc-cdis/wool](https://github.com/uc-cdis/wool)."
)
ASK_FOR_FORMAT_COMMIT = ["wool", "black", "please format my code"]
def main():
"""
When triggered by pull request commits, runs comment_on_pr to comment
on the PR with the necessary formatting changes.
When triggered by pull request comments, runs commit_on_pr to make a
commit with the necessary formatting changes.
"""
event_name = os.environ.get("GITHUB_EVENT_NAME")
print("GITHUB_EVENT_NAME:", event_name)
if event_name == "issue_comment":
commit_on_pr()
else:
# if not running in a GH workflow or if the workflow was not
# triggered by a comment, just comment on the PR
comment_on_pr()
def find_old_comment(comments_info):
"""
comments_info should be the JSON response from:
/{org}/{repo}/issues/{PR}/comments
"""
for comment in comments_info:
if SIGNATURE in comment["body"]:
return comment
return None
def run_black(github, diff_only):
"""
Args:
github (GitHubInfo)
diff_only (bool): whether to return formatting diff or formatted files
Returns:
dict: file name to formatted contents (or to contents that should be
formatted, if diff_only is True)
"""
check_python_version()
print(f"running wool for {github.pr_url}")
# get files from PR
files = requests.get(github.pr_files_url, headers=github.headers).json()
if not isinstance(files, list):
print(files)
raise Exception("Unable to get PR files")
python_files = [f for f in files if f["filename"].endswith(".py")]
if not python_files:
print("no python files to check")
return {}
files_str = "\n".join(" {}".format(f["filename"]) for f in files)
print("checking files:\n{}".format(files_str))
# run black on the files
file_to_black = {}
for file_info in python_files:
filename = file_info["filename"]
contents_url = file_info["contents_url"]
contents_url_info = requests.get(contents_url, headers=github.headers).json()
download_url = contents_url_info["download_url"]
response = requests.get(download_url, headers=github.headers)
if response.status_code != 200:
raise Exception(
"Unable to get file `{}` at `{}`: got code {}.".format(
filename,
download_url[: download_url.index("token")],
response.status_code,
)
)
raw_contents = response.text
black_command = (
"black --diff - 2>/dev/null" if diff_only else "black - 2>/dev/null"
)
black_result = subprocess.run(
[black_command],
shell=True,
input=raw_contents,
encoding="utf-8",
stdout=subprocess.PIPE,
)
if black_result.stdout != raw_contents:
file_to_black[filename] = black_result.stdout
return file_to_black
def comment_on_pr(github=None):
"""
Comment on the PR with the formatting that should be fixed
"""
github = github or GitHubInfo(event_type="pull_request")
black_output = run_black(github, diff_only=True)
output = []
write = output.append
lint_success = True # switch to failure if diff found
for filename, black_diff in black_output.items():
black_output = "\n".join(black_diff.split("\n")[2:])
if black_output:
lint_success = False
write("--- {}".format(filename))
write("+++ blackened")
write(black_output)
full_output = "\n".join(output)
comment_body = black_comment_text(full_output)
comments_info = requests.get(github.comments_url, headers=github.headers).json()
old_comment = find_old_comment(comments_info)
if not old_comment:
response = requests.post(
github.comments_url, json={"body": comment_body}, headers=github.headers
)
if response.status_code != 201:
print("failed to write comment", file=sys.stderr)
print(response.json(), file=sys.stderr)
else:
old_comment_url = old_comment.get("url")
response = requests.patch(
old_comment_url, json={"body": comment_body}, headers=github.headers
)
if response.status_code != 200:
print("failed to edit comment", file=sys.stderr)
print(response.json(), file=sys.stderr)
if lint_success:
print("Nothing to report!")
else:
# write output in terminal in addition to commenting
print(f"\nBlack output:\n{full_output}\n")
exit(1) # fail the wool check
def commit_on_pr():
"""
Create a commit on the PR to fix the formatting
"""
github = GitHubInfo(event_type="issue")
# check if the comment is asking wool to format the code
comment_contents = github.payload["comment"]["body"]
if comment_contents.lower() not in ASK_FOR_FORMAT_COMMIT:
return
black_output = run_black(github, diff_only=False)
if not black_output:
print("No changes to commit")
return
# get latest commit on the PR
commits_url = "https://api.github.com/repos/paulineribeyre/tests/git/commits"
pr_info = requests.get(github.pr_url, headers=github.headers).json()
latest_commit_sha = pr_info["head"]["sha"]
response = requests.get(
commits_url + "/" + latest_commit_sha, headers=github.headers
)
if response.status_code != 200:
print("failed to get latest commit info", file=sys.stderr)
print(response.json(), file=sys.stderr)
return
latest_commit_tree_sha = response.json()["tree"]["sha"]
# get branch for this commit
branch_url = "https://api.github.com/repos/paulineribeyre/tests/commits/{}/branches-where-head".format(
latest_commit_sha
)
# endpoint "branches-where-head" is in beta
headers = github.headers.copy()
headers["Accept"] = "application/vnd.github.groot-preview+json"
response = requests.get(branch_url, headers=headers)
if response.status_code != 200:
print("failed to get commit branch", file=sys.stderr)
print(response.json(), file=sys.stderr)
return
branches = response.json()
if len(branches) != 1:
if len(branches) > 1:
print(
"Commit {} is the head of several branches. I don't know which one to update - exiting early".format(
latest_commit_sha
)
)
else:
print(
"Commit {} is not the head of the branch. Assuming a new commit has been pushed - exiting early".format(
latest_commit_sha
)
)
return
branch_name = branches[0]["name"]
# create new tree. it contains the formatted files
trees_url = "https://api.github.com/repos/paulineribeyre/tests/git/trees"
new_tree_body = {
"tree": [
{"content": contents, "path": filename, "mode": "100644", "type": "blob"}
for filename, contents in black_output.items()
],
"base_tree": latest_commit_tree_sha,
}
response = requests.post(trees_url, headers=github.headers, json=new_tree_body)
if response.status_code != 201:
print("failed to create new tree", file=sys.stderr)
print(response.json(), file=sys.stderr)
return
new_tree_sha = response.json()["sha"]
# create new commit
new_commit_body = {
"tree": new_tree_sha,
"parents": [latest_commit_sha],
"message": "Wool auto formatting",
}
response = requests.post(commits_url, headers=github.headers, json=new_commit_body)
if response.status_code != 201:
print("failed to create new commit", file=sys.stderr)
print(response.json(), file=sys.stderr)
return
new_commit_sha = response.json()["sha"]
# add the commit to the branch
refs_url = (
"https://api.github.com/repos/paulineribeyre/tests/git/refs/heads/"
+ branch_name
)
new_ref_body = {"sha": new_commit_sha}
response = requests.patch(refs_url, headers=github.headers, json=new_ref_body)
if response.status_code != 200:
print("failed to create new ref", file=sys.stderr)
print(response.json(), file=sys.stderr)
return
print("Pushed commit {} to branch {}".format(new_commit_sha, branch_name))
# manually trigger comment update and status check, since actions
# in workflows do not trigger new workflow runs
sleep(3) # wait for the commit to show up in GitHub...
comment_on_pr(github)
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
7007,
628,
198,
46224,
40086,
796,
357,
198,
220,
220,
220,
366,
1212,
33313,
2912,
373,
7560,
633... | 2.423775 | 3,693 |
#
# Quadroscope - entry point
#
# The four camera system for raspberries.
#
import os, os.path, sys, json, time, thread, datetime, logging
import picamera
logging.basicConfig(filename='camera.log', level=logging.DEBUG)
from quadlib.utils import log
from quadlib import convert
from quadlib.updater import Updater
from quadlib.utils.gpio import Gpio
from quadlib import client
from quadlib.CameraWrapper import CameraWrapper
root = os.path.dirname(__file__) + '/'
log('[startup] ----------------------------------------', time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.localtime()))
log('[root] ', root)
log('[__file__] ', __file__)
# Read the settings from the `config` and `percamconfig` folders.
# settings.json stores generic data:
# - credentials for the other pi's (should be the same for every pi)
# - upload path: this is where the employees will upload the pictures made
# - ssh path: this is where the boss will copy the most recent code files.
# GPIO:
# triggering ports for writing settings and making a picture
# - gpioBossTriggerPort: the GPIO pin on which the trigger is set
# - gpioHigh: once done with uploading settings, boss triggers this port to let the others know to make a pic
# - gpioEmployeeTriggerPort: if this is an employee, it will listen on this port for the shooting trigger
settings = json.load(open(root+'config/settings.json'))
# If file exists, this is the boss.
boss = os.path.isfile(root+'percamconfig/boss')
# If for some reason one of the cameras are flipped, set these files.
vflip = os.path.isfile(root+'percamconfig/vflip')
hflip = os.path.isfile(root+'percamconfig/hflip')
# The camera number.
camerano = open(root+'percamconfig/camerano', 'r').read().strip('\n')
debug = False
push = True
# FLAGS:
# -- debug: I have no clue what it does.
for arg in sys.argv:
if arg == '--debug': debug = True
if arg == '--restartall':
updater = Updater(camerano, boss, False)
updater.restart_employees()
sys.exit()
if arg == '--update':
updater = Updater(camerano, boss, True)
sys.exit()
if arg == '--status':
updater = Updater(camerano, boss, True)
updater.status()
sys.exit()
if arg == '--nopush':
push = False
updater = Updater(camerano, boss, debug)
if boss:
updater.push()
log("[root] Camera number(change it in camerano file): " + str(camerano))
gpio = Gpio(settings, boss)
picam_object = picamera.PiCamera()
picam_object.hflip = False
picam_object.vflip = vflip
picam_object.hflip = hflip
camera = CameraWrapper(picam_object, updater, gpio, boss, camerano)
# show user that we are starting
gpio.blinkCamera(4)
log( "[listener] listening on port " + str(gpio.port))
# start camera loop
if boss:
from quadlib import gui
gui.main(settings, boss, updater, camera)
# Restart all the other cameras by default.
updater = Updater(camerano, boss, False)
updater.restart_employees()
else:
updater.pull()
client.camera_loop( updater, gpio, camera )
| [
2,
198,
2,
220,
220,
48447,
29982,
532,
5726,
966,
198,
2,
198,
2,
220,
220,
383,
1440,
4676,
1080,
329,
374,
5126,
20853,
13,
198,
2,
220,
220,
220,
198,
198,
11748,
28686,
11,
28686,
13,
6978,
11,
25064,
11,
33918,
11,
640,
11... | 2.711033 | 1,142 |
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
import smtplib
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class get_smtp_client(object):
"""This will construct an SMTP client given the options configured
in storyboard.conf, and return it. If authentication options are
provided, it will attempt to use these to connect to the server.
"""
| [
2,
15069,
357,
66,
8,
1853,
30446,
15503,
12,
11869,
446,
7712,
5834,
11,
406,
13,
47,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
239... | 3.547445 | 274 |
#!/usr/bin/env python
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.server import Site
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyws.adapters._twisted import serve
from server import server
#noinspection PyUnresolvedReferences
reactor.listenTCP(8000, Site(Simple()))
#noinspection PyUnresolvedReferences
reactor.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
19074,
13,
37675,
1330,
21905,
198,
6738,
19074,
13,
12384,
13,
31092,
1330,
20857,
198,
6738,
19074,
13,
12384,
13,
15388,
1330,
14413,
198,
198,
11748,
28686,
11,
25064,
198... | 3.1 | 140 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
from multiprocessing import Pool as ProcessPool
from multiprocessing.dummy import Pool as ThreadPool
from common import timed
NUM_WORKERS = 8 # four cores
def fib(n):
"""Calculate the n-th element of Fibonacci."""
a, b = 1, 1
while n > 1:
a, b = b, a + b
n -= 1
return a
timed(fib, 5000)
ELEMENTS = [i * 1000 for i in range(1, NUM_WORKERS + 1)]
def fibs():
"""Calculate elements of Fibonacci in series."""
return [fib(n) for n in ELEMENTS]
def fibp(pool):
"""Calculate elements of Fibonacci in parallel."""
return list(pool.map(fib, ELEMENTS))
with ThreadPool(NUM_WORKERS) as pool:
timed(fibs)
timed(fibp, pool)
with ProcessPool(NUM_WORKERS) as pool:
timed(fibs)
timed(fibp, pool)
| [
6738,
18540,
305,
919,
278,
1330,
19850,
355,
10854,
27201,
198,
6738,
18540,
305,
919,
278,
13,
67,
13513,
1330,
19850,
355,
14122,
27201,
198,
6738,
2219,
1330,
28805,
198,
198,
41359,
62,
33249,
4877,
796,
807,
220,
1303,
1440,
21758... | 2.448387 | 310 |
"""
Communicate with a Keysight 3458A digital multimeter.
"""
from . import equipment
from .dmm import DMM
@equipment(manufacturer=r'Keysight', model=r'3458A')
| [
37811,
198,
30813,
5344,
351,
257,
26363,
432,
513,
29334,
32,
4875,
1963,
16912,
13,
198,
37811,
198,
6738,
764,
1330,
5112,
198,
6738,
764,
67,
3020,
1330,
360,
12038,
628,
198,
31,
4853,
4667,
7,
48119,
15051,
28,
81,
6,
40729,
4... | 3.056604 | 53 |
import time
import torch
from queue import Queue
import numpy as np
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
| [
11748,
640,
198,
11748,
28034,
198,
6738,
16834,
1330,
4670,
518,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4299,
13934,
62,
4134,
23843,
7,
28764,
82,
11,
331,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16409,
9922,
583... | 2.918919 | 148 |
## cover.py
import array
import numpy as np
import numpy.typing as npt
import scipy.sparse as sp
from numpy.typing import ArrayLike
from scipy.sparse import csc_matrix, diags
from scipy.sparse.csgraph import minimum_spanning_tree,connected_components
from .distance import *
from .distance import is_dist_like
from .utility import find_where, cartesian_product, inverse_choose, rank_comb2, unrank_comb2
from .dimred import neighborhood_graph, neighborhood_list
from .samplers import landmarks
from .polytope import sdist_to_boundary
## Type tools
## NOTE: put collections first before typing!
from collections.abc import *
from typing import *
from itertools import combinations, product
## --- Cover protocol type (its structural subtype) ---
#
# a *Cover* is some type supporting the mixins of the Mapping[T, Union[Sequence, ArrayLike]] type
# - Supports mapping mixins (__getitem__, __iter__, __len__, keys, items, values)
# - The key type T = TypeVar('T') can be any type; i.e. any index set can be used
# - If value is a Sequence type => supports mixins (__getitem__, __len__, __contains__, __iter__, __reversed__, .index(), and .count()), and is sorted (precondition)!
# - If value is a ArrayLike type => .index() and .count() don't exist, np.searchsorted() and np.sum(...) are used instead
# A cover has the postcondition that, upon finishing its initialization from __init__(...), it must be a *valid* cover, i.e.
# if the space covers *n* points, then *validate_cover(n, cover)* must be true.
#
# To support constructing generic partitions of unity via bump functions, covers can optionally support bump functions
# [cover].bump(X: ArrayLike, index: T) -> yields *normalized* distance of X to set T, such that
# 1. d(x,T) >= 0.0 <=> (non-negative)
# 2. d(x,T) <= 1.0 <=> (x is contained within set T)
# The bump function is only needed is the partition of unity if one is not explicitly provided as a sparse matrix.
IndexArray = Union[Sequence, ArrayLike]
T = TypeVar('T')
@runtime_checkable
# def set_distance(self, X: ArrayLike, index: T): ...
## Minimally, one must implement keys(), __getitem__()
## Must implement set_distance or set_barycenter....
def mollify(x: float, method: Optional[str] = "identity"):
'''
Applies a mollifier to modify the shape of 'x' (typically the result of a bump function).
This method sets all negative values, if they exist, in 'x' to 0. Choices for 'method' include:
method \in ['identity', 'quadratic', 'cubic', 'quartic', 'quintic', 'gaussian', 'logarithmic']
Alternatively, 'method' may be an arbitrary Callable.
Note that all negative entries of 'x' are always set to 0, even if a Callable is given.
'''
x = np.maximum(x, 0.0) # truncate to only be positive!
if method == "triangular" or method == "linear" or method == "identity":
s = x
elif method == "quadratic":
s = x**2
elif method == "cubic":
s = x**3
elif method == "quartic":
s = x**4
elif method == "quintic":
s = x**5
elif method == "gaussian":
s = np.array([np.exp(-1.0/(d**2)) if d > 0.0 else 0.0 for d in x])
elif method == "logarithmic":
s = np.log(1.0+x)
elif isinstance(method, Callable):
s = method(x)
else:
raise ValueError("Invalid bump function specified.")
return(s)
## This is just a specialized ball cover w/ a fixed radius
class IntervalCover(Cover):
'''
A Cover is *CoverLike*
Cover -> divides data into subsets, satisfying covering property, retains underlying neighbor structures + metric (if any)
Parameters:
a :=
n_sets := number of cover elements to use to create the cover
scale :=
bounds := (optional) bounds indicating the domain of the cover. Can be given as a set of intervals (per column). Otherwise the bounding box is inferred from 'a'.
implicit := optional, whether to store all the subsets in a precomputed dictionary, or construct them on-demand. Defaults to the former.
'''
# This is custom to allow the representation to be implicit
# This is custom to allow the representation to be implicit
## Minimally, one must implement keys(), __getitem__(), and set_distance()
class CircleCover(Cover):
'''
1-dimensional circular cover over the interval [lb, ub] (defaults to [0, 2*pi])
This cover computes distances modulo the givens bounds
'''
def dist_to_boundary(P: npt.ArrayLike, x: npt.ArrayLike):
''' Given ordered vertices constituting polygon boundary and a point 'x', determines distance from 'x' to polygon boundary on ray emenating from centroid '''
return(sdist_to_boundary(x, P))
# B = Polygon(P)
# c = B.centroid
# ## direction away from centroid
# v = np.array(x) - np.array(c.coords)
# v = v / np.linalg.norm(v)
# ## max possible distance away
# xx, yy = B.minimum_rotated_rectangle.exterior.coords.xy
# max_diam = np.max(np.abs(np.array(xx) - np.array(yy)))
# ## minimize convex function w/ golden section search
# dB = lambda y: B.boundary.distance(Point(np.ravel(c + y*v)))
# y_opt = golden(dB, brack=(0, max_diam))
# ## return final distance
# eps = np.linalg.norm(np.array(c.coords) - np.ravel(c + y_opt*v))
# dc = np.linalg.norm(np.array(x) - np.array(c.coords))
# return(eps, dc)
## TODO: Use code below to partition data set, then use multi-D "floodfill" type search
## to obtain O(n + k) complexity. Could also bound how many sets overlap and search all k of those
## to yield O(nk) vectorized
## breaks = [bin_width[d]*np.array(list(range(n_sets[d]))) for d in range(D)]
## np.reshape([np.digitize(x[:,d], breaks[d]) for d in range(D)], x.shape)
# prop_overlap <- self$percent_overlap/100
# base_interval_length <- filter_len/self$number_intervals
# interval_length <- base_interval_length + (base_interval_length * prop_overlap)/(1.0 - prop_overlap)
# eps <- (interval_length/2.0) + sqrt(.Machine$double.eps) ## ensures each point is in the cover
# set_bounds <- do.call(rbind, lapply(index, function(idx_str){
# idx <- strsplit(substr(idx_str, start=2L, stop=nchar(idx_str)-1L), split = " ")[[1]]
# centroid <- filter_min + ((as.integer(idx)-1L)*base_interval_length) + base_interval_length/2.0
# return(c(centroid - eps, centroid + eps))
# }))
## A Partition of unity is function mapping a set of points to weighted vectors
## Parameters:
## - cover := CoverLike that has a .bump() function
## - mollifer := string or Callable indicating the choice of mollifier
## - weights := not implemented
## - normalize := whether to request the bump functions normalize their output. Defaults to true.
## Returns:
## - (m x J) sparse matrix whose row contains the result of the given 'mollifier' applied to the cover's bump function
| [
2235,
3002,
13,
9078,
198,
11748,
7177,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
774,
13886,
355,
299,
457,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
599,
198,
6738,
299,
32152,
13,
774,
13886,
1330,
15690,
... | 2.978066 | 2,234 |
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--storage_bucket', type=str)
args = parser.parse_args()
mnisttrain(args.storage_bucket) | [
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
1822,
29572,
628,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
220,
220,
220,
30751,
13,
2860,
62,
49140,
10786,
43... | 2.64557 | 79 |
import sys
import time
import netlink
import numpy as np
import utils
from read_bfee import BeamformReader
from realtime_graph import RealtimeGraph
if __name__ == "__main__":
rxChains = "ABC"
#utils.configure_rx_chains(rxChains)
gType = "default"
if len(sys.argv) == 2:
gType = sys.argv[1]
graph = RealtimeGraph(gType)
sock = netlink.open_socket()
bfRead = BeamformReader()
while True:
try:
payload = netlink.recv_from_socket(sock)
csiEntry = bfRead.read_bf_entry(payload)
graph.update(csiEntry)
except OSError:
print("error")
netlink.close_socket(sock)
| [
11748,
25064,
198,
11748,
640,
198,
198,
11748,
2010,
8726,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
3384,
4487,
198,
6738,
1100,
62,
19881,
1453,
1330,
25855,
687,
33634,
198,
6738,
1103,
2435,
62,
34960,
1330,
6416,
2435,
37065,
... | 2.222591 | 301 |
from .create import CreateTodoForm
| [
6738,
764,
17953,
1330,
13610,
51,
24313,
8479,
198
] | 3.888889 | 9 |
"""
Functions for monkey patching functions in other modules. See https://en.wikipedia.org/wiki/Monkey_patch
There are other excellent libraries for this, which unfortunately don't excactly match our use case:
- https://github.com/christophercrouzet/gorilla
- https://github.com/iki/monkeypatch
- https://github.com/theatlantic/python-monkey-business
- https://bitbucket.org/schesis/ook
"""
import functools
import sys
import typing
REPLACED_FUNCTIONS: {str: str} = {}
"""
A list of all functions that have been replaced or wrapped by other functions, for documentation purposes
The dictionary maps the module and name of the original function to a tuple to the module and name of the new function
"""
def patch(original_function: typing.Callable) -> typing.Callable:
"""
A decorator for replacing a function in another module
Example:
>>> # in some_package.some_module:
... def some_function(x):
... return x + 1
>>> some_package.some_module.some_function(1)
2
>>> @patch(some_package.some_module.some_function)
... def new_function(x):
... return x + 2
>>> some_package.some_module.some_function(1)
3
>>> # equivalent:
>>> patch(some_package.some_module.some_function)(lambda x: x + 2)
Args:
original_function: The function or method to patch
Returns: The replaced function
"""
return decorator
def wrap(original_function: typing.Callable) -> typing.Callable:
"""
A decorator for wrapping a function in another module
Example:
>>> # in some_package.some_module:
... def some_function(x):
... return x + 1
>>> some_package.some_module.some_function(1)
2
>>> @wrap(some_package.some_module.some_function)
... def new_function(original_function, x):
... return original_function(x) + 1
>>> some_package.some_module.some_function(1)
3
Args:
original_function: The function or method to wrap
Returns: The wrapped function
"""
return decorator | [
37811,
198,
24629,
2733,
329,
21657,
8529,
278,
5499,
287,
584,
13103,
13,
4091,
3740,
1378,
268,
13,
31266,
13,
2398,
14,
15466,
14,
9069,
2539,
62,
17147,
198,
1858,
389,
584,
6275,
12782,
329,
428,
11,
543,
12716,
836,
470,
2859,
... | 2.979412 | 680 |
import logging
from types import TracebackType
from typing import Optional, cast
from common.exception_enums import ExceptionGroup
from common.exception_handler import ExceptionHandler
from dask.utils import funcname
from distributed import Scheduler, Worker
from distributed.diagnostics.plugin import SchedulerPlugin, WorkerPlugin
from panoramic.cli.datacol.instrumentation.measurement import Measure
| [
11748,
18931,
198,
6738,
3858,
1330,
34912,
1891,
6030,
198,
6738,
19720,
1330,
32233,
11,
3350,
198,
198,
6738,
2219,
13,
1069,
4516,
62,
268,
5700,
1330,
35528,
13247,
198,
6738,
2219,
13,
1069,
4516,
62,
30281,
1330,
35528,
25060,
19... | 4.229167 | 96 |
t=0
s=0
for i in range(1,500):
t=i
for j in range(1,t):
if(t%j==0):
s=s+j
if s==i:
print(s,end=" ")
s=0
t=0 | [
83,
28,
15,
201,
198,
82,
28,
15,
201,
198,
1640,
1312,
287,
2837,
7,
16,
11,
4059,
2599,
201,
198,
220,
220,
220,
256,
28,
72,
201,
198,
220,
220,
220,
329,
474,
287,
2837,
7,
16,
11,
83,
2599,
201,
198,
220,
220,
220,
220,... | 1.386555 | 119 |
# Generated by Django 3.1.8 on 2021-07-20 00:47
import dictators.dictators_game.models
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
23,
319,
33448,
12,
2998,
12,
1238,
3571,
25,
2857,
198,
198,
11748,
20591,
13,
11600,
2024,
62,
6057,
13,
27530,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.095238 | 42 |
from django.conf.urls import include, url
from django.contrib import messages as django_messages
from django.templatetags.static import static
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
import swapper
from wagtail import VERSION as WAGTAIL_VERSION
from wagtail.admin import messages
from wagtail.admin.action_menu import ActionMenuItem
from wagtail.admin.menu import MenuItem
from wagtail.core import hooks
from wagtail_review import admin_urls
from wagtail_review.forms import get_review_form_class, ReviewerFormSet
Review = swapper.load_model('wagtail_review', 'Review')
@hooks.register('register_admin_urls')
# Replace 'submit for moderation' action with 'submit for review'
@hooks.register('construct_page_action_menu')
hooks.register('after_create_page', handle_submit_for_review)
hooks.register('after_edit_page', handle_submit_for_review)
@hooks.register('register_admin_menu_item')
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
355,
42625,
14208,
62,
37348,
1095,
198,
6738,
42625,
14208,
13,
11498,
489,
265,
316,
3775,
13,
12708,
1330,
9037,
... | 3.266667 | 315 |
"""
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
import sys, os, re
import md5
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'methods.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'number.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'usertypes.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'buffer.c'),
join('umath', 'ufunc_object.c'),
join('umath', 'loops.c.src'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5)
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
fargs_str = ' '.join(function_args).rstrip(' )')
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except:
print filename, lineno+1
raise
fo.close()
return functions
def read_order(order_file):
"""
Read the order of the API functions from a file.
Comments can be put on lines starting with #
"""
fo = open(order_file, 'r')
order = {}
i = 0
for line in fo:
line = line.strip()
if not line.startswith('#'):
order[line] = i
i += 1
fo.close()
return order
def add_api_list(offset, APIname, api_list,
module_list, extension_list, init_list):
"""Add the API function declarations to the appropiate lists for use in
the headers.
"""
for k, func in enumerate(api_list):
num = offset + k
astr = "NPY_NO_EXPORT %s %s \\\n (%s);" % \
(func.return_type, func.name, func.argtypes_string())
module_list.append(astr)
astr = "#define %s \\\n (*(%s (*)(%s)) \\\n"\
" %s[%d])" % (func.name,func.return_type,
func.argtypes_string(), APIname, num)
extension_list.append(astr)
astr = " (void *) %s," % func.name
init_list.append(astr)
if __name__ == '__main__':
main()
| [
37811,
198,
3855,
7824,
1321,
30240,
287,
327,
3696,
13,
198,
198,
6214,
7559,
19796,
62,
8818,
15506,
329,
703,
5499,
815,
307,
39559,
11,
290,
198,
15506,
961,
62,
2875,
15506,
329,
703,
262,
1502,
286,
262,
5499,
815,
307,
198,
2... | 1.935902 | 2,933 |
"""
Base class for RoBO algorithms.
"""
import george
import numpy
from orion.algo.base import BaseAlgorithm
from robo.acquisition_functions.ei import EI
from robo.acquisition_functions.lcb import LCB
from robo.acquisition_functions.log_ei import LogEI
from robo.acquisition_functions.pi import PI
from robo.initial_design import init_latin_hypercube_sampling
from robo.maximizers.differential_evolution import DifferentialEvolution
from robo.maximizers.random_sampling import RandomSampling
from robo.maximizers.scipy_optimizer import SciPyOptimizer
from robo.priors.default_priors import DefaultPrior
from robo.solver.bayesian_optimization import BayesianOptimization
def build_bounds(space):
"""
Build bounds of optimization space
Parameters
----------
space: ``orion.algo.space.Space``
Search space for the optimization.
"""
lower = []
upper = []
for dim in space.values():
low, high = dim.interval()
shape = dim.shape
assert not shape or shape == [1]
lower.append(low)
upper.append(high)
return list(map(numpy.array, (lower, upper)))
def build_kernel(lower, upper):
"""
Build kernels for GPs.
Parameters
----------
lower: numpy.ndarray (D,)
The lower bound of the search space
upper: numpy.ndarray (D,)
The upper bound of the search space
"""
assert upper.shape[0] == lower.shape[0], "Dimension miss match"
assert numpy.all(lower < upper), "Lower bound >= upper bound"
cov_amp = 2
n_dims = lower.shape[0]
initial_ls = numpy.ones([n_dims])
exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims)
kernel = cov_amp * exp_kernel
return kernel
def infer_n_hypers(kernel):
"""Infer number of MCMC chains that should be used based on size of kernel"""
n_hypers = 3 * len(kernel)
if n_hypers % 2 == 1:
n_hypers += 1
return n_hypers
def build_prior(kernel):
"""Build default GP prior based on kernel"""
return DefaultPrior(len(kernel) + 1, numpy.random.RandomState(None))
def build_acquisition_func(acquisition_func, model):
"""
Build acquisition function
Parameters
----------
acquisition_func: str
Name of the acquisition function. Can be one of ``['ei', 'log_ei', 'pi', 'lcb']``.
model: ``robo.models.base_model.BaseModel``
Model used for the Bayesian optimization.
"""
if acquisition_func == "ei":
acquisition_func = EI(model)
elif acquisition_func == "log_ei":
acquisition_func = LogEI(model)
elif acquisition_func == "pi":
acquisition_func = PI(model)
elif acquisition_func == "lcb":
acquisition_func = LCB(model)
else:
raise ValueError(
"'{}' is not a valid acquisition function".format(acquisition_func)
)
return acquisition_func
def build_optimizer(model, maximizer, acquisition_func):
"""
General interface for Bayesian optimization for global black box
optimization problems.
Parameters
----------
maximizer: str
The optimizer for the acquisition function.
Can be one of ``{"random", "scipy", "differential_evolution"}``
acquisition_func:
The instantiated acquisition function
Returns
-------
Optimizer
"""
if maximizer == "random":
max_func = RandomSampling(acquisition_func, model.lower, model.upper, rng=None)
elif maximizer == "scipy":
max_func = SciPyOptimizer(acquisition_func, model.lower, model.upper, rng=None)
elif maximizer == "differential_evolution":
max_func = DifferentialEvolution(
acquisition_func, model.lower, model.upper, rng=None
)
else:
raise ValueError(
"'{}' is not a valid function to maximize the "
"acquisition function".format(maximizer)
)
# NOTE: Internal RNG of BO won't be used.
# NOTE: Nb of initial points won't be used within BO, but rather outside
bo = BayesianOptimization(
lambda: None,
model.lower,
model.upper,
acquisition_func,
model,
max_func,
initial_points=None,
rng=None,
initial_design=init_latin_hypercube_sampling,
output_path=None,
)
return bo
class RoBO(BaseAlgorithm):
"""
Base class to wrap RoBO algorithms.
Parameters
----------
space: ``orion.algo.space.Space``
Optimisation space with priors for each dimension.
seed: None, int or sequence of int
Seed to sample initial points and candidates points.
Default: 0.
n_initial_points: int
Number of initial points randomly sampled. If new points
are requested and less than `n_initial_points` are observed,
the next points will also be sampled randomly instead of being
sampled from the parzen estimators.
Default: ``20``
maximizer: str
The optimizer for the acquisition function.
Can be one of ``{"random", "scipy", "differential_evolution"}``.
Defaults to 'random'
acquisition_func: str
Name of the acquisition function. Can be one of ``['ei', 'log_ei', 'pi', 'lcb']``.
**kwargs:
Arguments specific to each RoBO algorithms. These will be registered as part of
the algorithm's configuration.
"""
requires_type = "real"
requires_dist = "linear"
requires_shape = "flattened"
@property
def space(self):
"""Space of the optimizer"""
return self._space
@space.setter
def space(self, space):
"""Setter of optimizer's space.
Side-effect: Will initialize optimizer.
"""
self._original = self._space
self._space = space
self._initialize()
def _initialize_model(self):
"""Build model and register it as ``self.model``"""
raise NotImplementedError()
def build_acquisition_func(self):
"""Build and return the acquisition function."""
return build_acquisition_func(self.acquisition_func, self.model)
def _initialize(self):
"""Initialize the optimizer once the space is transformed"""
self._initialize_model()
self.robo = build_optimizer(
self.model,
maximizer=self.maximizer,
acquisition_func=self.build_acquisition_func(),
)
self.seed_rng(self.seed)
@property
def X(self):
"""Matrix containing trial points"""
ref_point = self.space.sample(1, seed=0)[0]
points = list(self._trials_info.values()) + self._bo_duplicates
points = list(filter(lambda point: point[1] is not None, points))
X = numpy.zeros((len(points), len(ref_point)))
for i, (point, _result) in enumerate(points):
X[i] = point
return X
@property
def y(self):
"""Vector containing trial results"""
points = list(self._trials_info.values()) + self._bo_duplicates
points = list(filter(lambda point: point[1] is not None, points))
y = numpy.zeros(len(points))
for i, (_point, result) in enumerate(points):
y[i] = result["objective"]
return y
def seed_rng(self, seed):
"""Seed the state of the random number generator.
Parameters
----------
seed: int
Integer seed for the random number generator.
"""
self.rng = numpy.random.RandomState(seed)
rand_nums = self.rng.randint(1, 10e8, 4)
if self.robo:
self.robo.rng = numpy.random.RandomState(rand_nums[0])
self.robo.maximize_func.rng.seed(rand_nums[1])
if self.model:
self.model.seed(rand_nums[2])
numpy.random.seed(rand_nums[3])
@property
def state_dict(self):
"""Return a state dict that can be used to reset the state of the algorithm."""
s_dict = super(RoBO, self).state_dict
s_dict.update(
{
"rng_state": self.rng.get_state(),
"global_numpy_rng_state": numpy.random.get_state(),
"maximizer_rng_state": self.robo.maximize_func.rng.get_state(),
"bo_duplicates": self._bo_duplicates,
}
)
s_dict["model"] = self.model.state_dict()
return s_dict
def set_state(self, state_dict):
"""Reset the state of the algorithm based on the given state_dict
:param state_dict: Dictionary representing state of an algorithm
"""
super(RoBO, self).set_state(state_dict)
self.rng.set_state(state_dict["rng_state"])
numpy.random.set_state(state_dict["global_numpy_rng_state"])
self.robo.maximize_func.rng.set_state(state_dict["maximizer_rng_state"])
self.model.set_state(state_dict["model"])
self._bo_duplicates = state_dict["bo_duplicates"]
def suggest(self, num=None):
"""Suggest a `num`ber of new sets of parameters.
Perform a step towards negative gradient and suggest that point.
"""
num = min(num, max(self.n_initial_points - self.n_suggested, 1))
samples = []
candidates = []
while len(samples) < num:
if candidates:
candidate = candidates.pop(0)
if candidate:
self.register(candidate)
samples.append(candidate)
elif self.n_observed < self.n_initial_points:
candidates = self._suggest_random(num)
else:
candidates = self._suggest_bo(max(num - len(samples), 0))
if not candidates:
break
return samples
@property
def is_done(self):
"""Whether the algorithm is done and will not make further suggestions.
Return True, if an algorithm holds that there can be no further improvement.
By default, the cardinality of the specified search space will be used to check
if all possible sets of parameters has been tried.
"""
if self.n_suggested >= self._original.cardinality:
return True
if self.n_suggested >= getattr(self, "max_trials", float("inf")):
return True
return False
| [
37811,
198,
14881,
1398,
329,
5564,
8202,
16113,
13,
198,
37811,
198,
11748,
4903,
3643,
198,
11748,
299,
32152,
198,
6738,
393,
295,
13,
282,
2188,
13,
8692,
1330,
7308,
2348,
42289,
198,
6738,
686,
2127,
13,
43561,
10027,
62,
12543,
... | 2.431529 | 4,250 |
from demo.my_logger import getLogger
testing()
| [
6738,
13605,
13,
1820,
62,
6404,
1362,
1330,
651,
11187,
1362,
628,
198,
198,
33407,
3419,
198
] | 2.941176 | 17 |
import tensorflow as tf
import numpy as np
def read_and_decode_batch(filename_queue, batch_size, capacity, min_after_dequeue):
"""Dequeue a batch of data from the TFRecord.
Args:
filename_queue: Filename Queue of the TFRecord.
batch_size: How many records dequeued each time.
capacity: The capacity of the queue.
min_after_dequeue: Ensures a minimum amount of shuffling of examples.
Returns:
List of the dequeued (batch_label, batch_ids, batch_values).
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
batch_serialized_example = tf.train.shuffle_batch([serialized_example],
batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue)
# The feature definition here should BE consistent with LibSVM TO TFRecord process.
features = tf.parse_example(batch_serialized_example,
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32)
})
batch_label = features["label"]
batch_ids = features["ids"]
batch_values = features["values"]
return batch_label, batch_ids, batch_values
def sparse_tensor_to_train_batch(dense_label, dense_ids, dense_values):
"""Transform the dence ids and values to TF understandable inputs. Meanwhile, one-hot encode the labels.
For instance, for dense_ids in the form of
[[1, 4, 6, -1],
[2, 3, -1, -1],
[3, 4, 5, 6], ...
]
should be transformed into
[[0, 1], [0, 4], [0, 6],
[1, 2], [1, 3],
[2, 3], [2, 4], [2, 5], [2, 6], ...
]
For dense_values in the form of:
[[0.01, 0.23, 0.45, -1],
[0.34, 0.25, -1, -1],
[0.23, 0.78, 0.12, 0.56], ...
]
should be transformed into
[0.01, 0.23, 0.45, 0.34, 0.25, 0.23, 0.78, 0.12, 0.56, ...]
Args:
dense_label: Labels.
dense_ids: Sparse indices.
dense_values: Sparse values.
Returns:
List of the processed (label, ids, values) ready for training inputs.
"""
indice_flatten = []
values_flatten = []
label_flatten = []
index = 0
for i in range(0, dense_label.shape[0]):
if int(float(dense_label[i])) == 0:
label_flatten.append([1.0, 0.0])
else:
label_flatten.append([0.0, 1.0])
values = list(dense_values)
indice = list(dense_ids)
for j in range(0,len(indice[i])):
if not indice[i][j] == -1:
indice_flatten.append([index,indice[i][j]])
values_flatten.append(values[i][j])
else:
break
index += 1
return np.array(label_flatten), indice_flatten, values_flatten
def libsvm_data_read(input_filename):
"""Read all the data from the LibSVM file.
Args:
input_filename: Filename of the LibSVM.
Returns:
List of the acquired (label, ids, values).
"""
labels = []
ids_all = []
values_all = []
for line in open(input_filename, 'r'):
data = line.split(' ')
if int(float(data[0])) == 0:
labels.append([1.0, 0.0])
else:
labels.append([0.0, 1.0])
ids = []
values = []
for fea in data[1:]:
id, value = fea.split(':')
ids.append(int(id))
values.append(float(value))
ids_all.append(ids)
values_all.append(values)
return np.array(labels), np.array(ids_all), np.array(values_all)
def libsvm_convert_sparse_tensor(array_ids, array_values):
"""Transform the contents into TF understandable formats, which is similar to
sparse_tensor_to_train_batch().
Args:
array_ids: Sparse indices.
array_values: Sparse values.
Returns:
List of the transformed (ids, values).
"""
indice_flatten_v = []
values_flatten_v = []
index = 0
for i in range(0, array_ids.shape[0]):
for j in range(0, len(array_ids[i])):
indice_flatten_v.append([index, array_ids[i][j]])
values_flatten_v.append(array_values[i][j])
index += 1
return indice_flatten_v, values_flatten_v
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| [
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
201,
198,
4299,
1100,
62,
392,
62,
12501,
1098,
62,
43501,
7,
34345,
62,
36560,
11,
15458,
62,
7857,
11,
5339,
11,
949,
62,
8499,
62,... | 2.140709 | 2,793 |
from .hub_interface import * # noqa
from .model import * # noqa | [
6738,
764,
40140,
62,
39994,
1330,
1635,
220,
1303,
645,
20402,
198,
6738,
764,
19849,
1330,
1635,
220,
1303,
645,
20402
] | 3.095238 | 21 |
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.instancestatus import Status, Details
class Event(object):
"""
A status event for an instance.
:ivar type: The type of the event.
:ivar id: The ID of the event.
:ivar description: A string describing the reason for the event.
:ivar not_before: A datestring describing the earliest time for
the event.
:ivar not_after: A datestring describing the latest time for
the event.
"""
class Action(object):
"""
An action for an instance.
:ivar code: The code for the type of the action.
:ivar id: The ID of the event.
:ivar type: The type of the event.
:ivar description: A description of the action.
"""
class VolumeStatus(object):
"""
Represents an EC2 Volume status as reported by
DescribeVolumeStatus request.
:ivar id: The volume identifier.
:ivar zone: The availability zone of the volume
:ivar volume_status: A Status object that reports impaired
functionality that arises from problems internal to the instance.
:ivar events: A list of events relevant to the instance.
:ivar actions: A list of events relevant to the instance.
"""
class VolumeStatusSet(list):
"""
A list object that contains the results of a call to
DescribeVolumeStatus request. Each element of the
list will be an VolumeStatus object.
:ivar next_token: If the response was truncated by
the EC2 service, the next_token attribute of the
object will contain the string that needs to be
passed in to the next request to retrieve the next
set of results.
"""
| [
2,
15069,
357,
66,
8,
2321,
20472,
402,
28610,
265,
2638,
1378,
70,
28610,
265,
13,
2398,
14,
198,
2,
15069,
357,
66,
8,
2321,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
198,
2,
1439,
6923,
33876,
198,
2,
198,
2,
2448,
... | 3.305065 | 849 |
# type: ignore[misc]
"""yt-dlg module that contains util functions.
Attributes:
YOUTUBEDL_BIN (string): Youtube-dl binary filename.
"""
from __future__ import annotations
import locale
import math
import os
import subprocess
import sys
from pathlib import Path
from .info import __appname__
IS_WINDOWS = os.name == "nt"
YOUTUBEDL_BIN: str = "youtube-dl"
if IS_WINDOWS:
YOUTUBEDL_BIN += ".exe"
YTDLP_BIN: str = "yt-dlp"
if IS_WINDOWS:
YTDLP_BIN += ".exe"
FILESIZE_METRICS = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"]
KILO_SIZE = 1024.0
locale_getdefaultlocale = locale.getdefaultlocale
locale_getpreferredencoding = locale.getpreferredencoding
def get_encoding() -> str:
"""Return system encoding, elsese utf-8"""
try:
encoding = locale_getpreferredencoding()
_ = "TEST".encode(encoding)
except locale.Error:
encoding = "utf-8"
return encoding
os_startfile = startfile
def remove_shortcuts(path: str) -> str:
"""Return given path after removing the shortcuts."""
return path.replace("~", str(Path().home()))
def absolute_path(filename: str) -> str:
"""Return absolute path to the given file."""
return str(Path(filename).resolve())
def open_file(file_path: str) -> bool:
"""Open file in file_path using the default OS application.
Returns:
True on success else False.
"""
if not Path(file_path).exists():
return False
os_startfile(file_path)
return True
def encode_tuple(tuple_to_encode: tuple[int, int]) -> str:
"""Turn size tuple into string."""
return f"{tuple_to_encode[0]}/{tuple_to_encode[1]}"
def decode_tuple(encoded_tuple: str) -> tuple[int, int]:
"""Turn tuple string back to tuple."""
s = encoded_tuple.split("/")
return int(s[0]), int(s[1])
def check_path(path: str) -> None:
"""Create path if not exist."""
if not Path(path).exists():
os.makedirs(path)
# noinspection PyUnusedLocal
def get_config_path() -> str:
"""Return user config path.
Note:
Windows = %AppData% + app_name
Linux = ~/.config + app_name
"""
ytdlg_path: str = ""
if os.name == "nt":
ytdlg_path = os.getenv("APPDATA", "")
else:
ytdlg_path = str(Path().home() / Path(".config"))
return str(Path(ytdlg_path) / Path(__appname__.lower()))
# noinspection PyUnusedLocal
def shutdown_sys(password=None) -> bool:
"""Shuts down the system.
Returns True if no errors occur else False.
Args:
password (string): SUDO password for linux.
Note:
On Linux you need to provide sudo password if you don't
have elevated privileges.
"""
info = None
cmd = []
encoding: str = get_encoding()
kwargs = dict(
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding=encoding,
creationflags=0,
)
if os.name == "nt":
cmd = ["shutdown", "/s", "/t", "1"]
# Hide subprocess window
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
else:
kwargs["start_new_session"] = True
if password:
password = "%s\n" % password
cmd = ["sudo", "-S", "/sbin/shutdown", "-h", "now"]
else:
cmd = ["/sbin/shutdown", "-h", "now"]
shutdown_proc = subprocess.Popen(
cmd, startupinfo=info, **kwargs
) # type: ignore[call-overload]
output = shutdown_proc.communicate(password)[1]
return not output or output == "Password:"
def get_time(seconds: float) -> dict[str, int]:
"""Convert given seconds to days, hours, minutes and seconds.
Args:
seconds (float): Time in seconds.
Returns:
Dictionary that contains the corresponding days, hours, minutes
and seconds of the given seconds.
"""
dtime = dict(seconds=0, minutes=0, hours=0, days=0)
dtime["days"] = int(seconds / 86400)
dtime["hours"] = int(seconds % 86400 / 3600)
dtime["minutes"] = int(seconds % 86400 % 3600 / 60)
dtime["seconds"] = int(seconds % 86400 % 3600 % 60)
return dtime
# noinspection PyPep8Naming
def get_locale_file() -> str | None:
"""Search for yt_dlg locale file.
Returns:
The path to yt_dlg locale file if exists else None.
Note:
Paths that get_locale_file() func searches.
__main__ dir, library dir
"""
SEARCH_DIRS = get_search_dirs("locale")
for directory in SEARCH_DIRS:
if directory.is_dir():
return str(directory)
return None
# noinspection PyPep8Naming
def get_icon_file() -> str | None:
"""Search for yt_dlg app icon.
Returns:
The path to yt_dlg icon file if exists, else returns None.
"""
pixmaps_dir = get_pixmaps_dir()
if pixmaps_dir:
ICON_NAME = "youtube-dl-gui.png"
icon_file = Path(pixmaps_dir) / Path(ICON_NAME)
if icon_file.exists():
return str(icon_file)
return None
# noinspection PyPep8Naming
def get_pixmaps_dir() -> str | None:
"""Return absolute path to the pixmaps icons folder.
Note:
Paths we search: __main__ dir, library dir
"""
SEARCH_DIRS = get_search_dirs("data")
for directory in SEARCH_DIRS:
pixmaps_dir = directory / Path("pixmaps")
if pixmaps_dir.is_dir():
return str(pixmaps_dir)
return None
def to_bytes(string: str) -> float:
"""Convert given youtube-dl size string to bytes."""
value = 0.0
index = 0
for index, metric in enumerate(reversed(FILESIZE_METRICS)):
if metric in string:
value = float(string.split(metric)[0])
break
exponent = index * (-1) + (len(FILESIZE_METRICS) - 1)
return round(value * (KILO_SIZE ** exponent), 2)
def format_bytes(bytes_: float) -> str:
"""Format bytes to youtube-dl size output strings."""
exponent = 0 if bytes_ == 0.0 else int(math.log(bytes_, KILO_SIZE))
suffix = FILESIZE_METRICS[exponent]
output_value = bytes_ / (KILO_SIZE ** exponent)
return f"{output_value:.2f}{suffix}"
def build_command(
options_list: list[str], url: str, cli_backend: str = YOUTUBEDL_BIN
) -> str:
"""Build the CLI Backend command line string."""
def escape(option: str) -> str:
"""Wrap option with double quotes if it contains special symbols."""
special_symbols: list[str] = [" ", "(", ")"]
for symbol in special_symbols:
if symbol in option:
return f'"{option}"'
return option
# If option has special symbols wrap it with double quotes
# Probably not the best solution since if the option already contains
# double quotes it will be a mess, see issue #173
options: list[str] = [escape(option) for option in options_list]
# Always wrap the url with double quotes
url = f'"{url}"'
return " ".join([cli_backend] + options + [url])
def get_default_lang() -> str:
"""Get default language using the 'locale' module."""
default_lang, _ = locale_getdefaultlocale()
return default_lang or "en_US"
def get_key(string: str, dictionary: dict[str, str], default: str = "") -> str:
"""Get key from a value in Dictionary. Return default if key doesn't exist"""
for key, value in dictionary.items():
if value == string:
default = key
return default
return default
| [
2,
2099,
25,
8856,
58,
44374,
60,
198,
37811,
20760,
12,
25404,
70,
8265,
326,
4909,
7736,
5499,
13,
198,
198,
29021,
25,
628,
220,
220,
220,
7013,
51,
10526,
1961,
43,
62,
33,
1268,
357,
8841,
2599,
27431,
12,
25404,
13934,
29472,
... | 2.468236 | 3,101 |
"""Permit List module"""
import json
import falcon
import jsend
import sentry_sdk
from screendoor_sdk.screendoor import Screendoor
class PermitList():
"""Permit List class"""
scrndr = None
scrndr_proj_id = None
logger_name = ''
referred_label_map = {
'MOD - Referred' : "Mayor's Office of Disability",
'Planning - Referred' : "Planning Department",
'Fire - Referred' : "Fire Department",
'DPH - Referred' : "Department of Public Health",
'Police - Referred' : "Police Department",
'Environment - Referred' : "Department of the Environment"
}
status_map = {
'Submitted' : 'Submitted',
'Processing' : 'Processing',
'On Hold' : 'On Hold',
'Approved' : 'Approved',
'Build-out' : 'Under Construction'
}
activity_map = {
'retail' : {'text': 'retailer (medical and adult use)',
'value': 'retailer (medical and adult use)'},
'delivery' : {'text': 'delivery only retailer (medical and adult use)',
'value': 'delivery only retail (medical and adult use)'},
'mcd' : {'text': 'medicinal cannabis retailer (medical only)',
'value': 'medical retailer (medical only)'}
}
def init_screendoor(self, key, version, host, project_id):
"""initialize screendoor"""
self.scrndr = Screendoor(key, version, host)
self.scrndr_proj_id = project_id
def get_permit_list(self, permit_type):
"""return list of permits"""
self.logger_name += '.get_permit_list.'+permit_type
params = {'per_page': 100, 'page' : 1}
# pylint: disable=line-too-long
params['advanced_search'] = '%5B%7B"name"%3A"form"%2C"placeholder"%3Anull%2C"method"%3A"is"%2C"value"%3A5804%7D%2C%7B"name"%3A"rfdd8a5g7g"%2C"placeholder"%3A"answer_to"%2C"method"%3A"is_any"%2C"value"%3A%5B"retailer+(medical+and+adult+use)"%2C"medical+retailer+(medical+only)"%2C"delivery+only+retail+(medical+and+adult+use)"%5D%7D%5D'
sd_responses = self.scrndr.get_project_responses(self.scrndr_proj_id, params, 500)
sd_responses_context = sd_responses
if isinstance(sd_responses, list):
sd_responses_context = {
'length': len(sd_responses),
'data': list(map(lambda x: x.get('sequential_id', ''), sd_responses))}
with sentry_sdk.configure_scope() as scope:
scope.set_tag('logger', self.logger_name)
scope.set_extra('get_permit_list.sd_responses', sd_responses_context)
return self.get_list_transform(sd_responses)
def get_list_transform(self, sd_responses):
"""return a transformed list from screendoor reponses """
permit_list = False
responses_missing = []
sd_fields = {
'activity' : 'dd8a5g7g',
'app_id' : 'uqqrsogr',
'biz_name' : 't00kheyd',
'dba_name' : '60w4ep9y',
'addr' : 'kbqz4189',
'parcel' : 'kvrgbqrl'
}
if isinstance(sd_responses, list):
permit_list = []
for resp in sd_responses:
if (resp.get('responses', False)
and resp['responses'].get(sd_fields['activity'], False)
and (resp['responses'].get(sd_fields['biz_name'], False)
or resp['responses'].get(sd_fields['dba_name'], False))
and (resp.get('status', '') in self.status_map.keys())
):
resp_status = self.status_map[resp.get('status')].lower()
resp_referred = self.get_referred_departments(resp.get('labels'))
item = {
'application_id':'',
'business_name':'',
'dba_name':'',
'address':'',
'parcel':'',
'status':resp_status,
'referred':", ".join(resp_referred)
}
data = resp['responses']
item['application_id'] = str(data.get(sd_fields['app_id']) or '')
if not data.get(sd_fields['app_id']):
item['application_id'] = 'P-' + str(resp['id'])
item['business_name'] = str(data.get(sd_fields['biz_name']) or '')
item['dba_name'] = str(data.get(sd_fields['dba_name']) or item['business_name'])
item['parcel'] = data.get(sd_fields['parcel'], '')
if data.get(sd_fields['addr']) and data.get(sd_fields['addr']).get('street'):
addr = data.get(sd_fields['addr'])
item['address'] = str(addr.get('street') or '')
item['address'] += ', '+str(addr.get('city') or '')
item['address'] += ', '+str(addr.get('state') or '')
item['address'] += ' '+str(addr.get('zipcode') or '')
item['address'] = item['address'].strip(' ,')
if data[sd_fields['activity']] and data[sd_fields['activity']]['checked']:
for applied_permit_type in data[sd_fields['activity']]['checked']:
item[applied_permit_type.lower()] = resp_status
permit_list.append(item)
else:
responses_missing.append(
{'id':resp['id'], 'sequential_id':resp['sequential_id']}
)
with sentry_sdk.configure_scope() as scope:
scope.set_extra('get_list_transform.permit_list_len', len(permit_list))
if responses_missing:
scope.set_extra('get_list_transform.responses_missing', responses_missing)
return permit_list
def get_legacy_list_transform(self, permit_list):
""" return permit list in legacy format """
legacy_permit_list = {}
for item in permit_list:
new_item = {
'application_id':item['application_id'],
'dba_name':item['dba_name'],
'address':item['address'],
'parcel':item['parcel'],
'activities':'',
'referring_dept':item['referred'],
'status': item['status'].title()
}
key = (new_item['dba_name'] + ' ' + new_item['application_id']).strip().upper()
acts = []
if item.get(self.activity_map['retail']['value']):
acts.append(self.activity_map['retail']['text'])
if item.get(self.activity_map['delivery']['value']):
acts.append(self.activity_map['delivery']['text'])
if item.get(self.activity_map['mcd']['value']):
acts.append(self.activity_map['mcd']['text'])
new_item['activities'] = ", ".join(acts)
#skip if activity only contains delivery only
if new_item['activities'] != self.activity_map['delivery']['text']:
legacy_permit_list[key] = new_item
return legacy_permit_list
def get_referred_departments(self, labels):
""" return list of referred to departments """
referred_to = []
for label in labels:
if label in list(self.referred_label_map.keys()):
referred_to.append(self.referred_label_map.get(label))
return referred_to
def on_get(self, _req, resp, permit_type):
"""on GET request
return list of permits
"""
msg = False
if permit_type in ('retail', 'retail_legacy'):
permit_list = self.get_permit_list(permit_type)
permit_list.sort(key=lambda v:
((v.get('dba_name') if v.get('dba_name')
else v.get('business_name', ''))
+' '+v.get('application_id', '')).upper())
if isinstance(permit_list, list):
if permit_type == 'retail_legacy':
data = self.get_legacy_list_transform(permit_list)
else:
data = {'list': permit_list}
data_json = jsend.success(data)
msg = 'success ('+str(len(permit_list))+')'
else:
pass
if msg is not False:
sentry_sdk.capture_message(msg, 'info')
resp.body = json.dumps(data_json)
resp.status = falcon.HTTP_200
else:
msg = 'ERROR'
sentry_sdk.capture_message(msg, 'error')
resp.body = json.dumps(jsend.error(msg))
resp.status = falcon.HTTP_400
| [
37811,
5990,
2781,
7343,
8265,
37811,
198,
11748,
33918,
198,
11748,
24215,
1102,
198,
11748,
474,
21280,
198,
11748,
1908,
563,
62,
21282,
74,
198,
6738,
629,
260,
437,
2675,
62,
21282,
74,
13,
1416,
260,
437,
2675,
1330,
1446,
260,
... | 1.932584 | 4,539 |
# Copyright 2020 Mathew Odden <mathewrodden@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa E501
"""IBM Cloud CRN types and functions.
Parsing a CRN from a (possibly urlencoded) string::
>>> import redstone.crn
>>> mycrn = redstone.crn.loads("crn%3Av1%3Astaging%3Apublic%3Aexampleservice%3Aglobal%3Aa%2Fe5d5b304e0f3469f9145bed817f2efe1%3A6fc68009-12f6-4f9e-be69-02cb3b7e0a8d%3A%3A")
>>> mycrn
redstone.crn.CRN(prefix='crn', version='v1', cname='staging', ctype='public', service_name='exampleservice', location='global', scope='a/e5d5b304e0f3469f9145bed817f2efe1', service_instance='6fc68009-12f6-4f9e-be69-02cb3b7e0a8d', resource_type='', resource='')
>>> str(mycrn)
'crn:v1:staging:public:exampleservice:global:a/e5d5b304e0f3469f9145bed817f2efe1:6fc68009-12f6-4f9e-be69-02cb3b7e0a8d::'
"""
import sys
import urllib.parse
SEGMENTS = [
"prefix",
"version",
"cname",
"ctype",
"service_name",
"location",
"scope",
"service_instance",
"resource_type",
"resource",
]
if __name__ == "__main__":
print(loads(sys.argv[1]))
| [
2,
15069,
12131,
6550,
6391,
440,
4742,
1279,
6759,
6391,
305,
4742,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
239... | 2.470229 | 655 |
from .helpers import Importer
if __name__ == '__main__':
run()
| [
6738,
764,
16794,
364,
1330,
1846,
26634,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1057,
3419,
198
] | 2.592593 | 27 |
bl_info = {
"name": "Torque Terrain (TER) format",
"author": "port",
"version": (0, 0, 1),
"blender": (2, 76, 0),
"location": "File > Import > Torque Terrain",
"description": "Import Torque Terrain (TER) files",
"warning": "",
"support": "COMMUNITY",
"category": "Import-Export"
}
if "bpy" in locals():
import importlib
if "import_ter" in locals():
importlib.reload(import_ter)
import os
import bpy
from . import import_ter
from bpy.props import (
StringProperty,
BoolProperty,
CollectionProperty,
EnumProperty,
FloatProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
orientation_helper_factory,
axis_conversion,
)
from bpy.types import (
Operator,
OperatorFileListElement,
)
if __name__ == "__main__":
register()
| [
2436,
62,
10951,
796,
1391,
198,
220,
220,
220,
366,
3672,
1298,
366,
15884,
4188,
3813,
3201,
357,
5781,
8,
5794,
1600,
198,
220,
220,
220,
366,
9800,
1298,
366,
634,
1600,
198,
220,
220,
220,
366,
9641,
1298,
357,
15,
11,
657,
1... | 2.255583 | 403 |
#!/usr/bin/env python
"""Define Class `FastaSplitter` which splits a fasta file into
smaller files each containing `reads_per_split` reads."""
import os
import os.path as op
import sys
from pbcore.io import FastaWriter
from pbtranscript.Utils import mkdir
from pbtranscript.io.ContigSetReaderWrapper import ContigSetReaderWrapper
__all__ = ["FastaSplitter"]
class FastaSplitter(object):
"""An object of `FastaSplitter` splits a fasta file into
smaller chunks with a given prefix."""
def _out_fn(self, split_index):
"""Return name of the `split_index`-th splitted file."""
if split_index > 999:
raise ValueError("Too many splitted files to generate: number " +
"of splitted files exceed 1000.")
name = "{prefix}_{idx:03d}.fasta".format(prefix=self.out_prefix,
idx=split_index)
return op.join(self.out_dir, name)
def split(self, reads_in_first_split=None):
"""Split `input_fasta` into smaller files each containing
`reads_per_split` reads. Return splitted fasta."""
split_index = 0
self.out_fns = []
writer = FastaWriter(self._out_fn(split_index))
self.out_fns.append(self._out_fn(split_index))
if reads_in_first_split is None:
reads_in_first_split = self.reads_per_split
with ContigSetReaderWrapper(self.input_fasta) as reader:
for ridx, r in enumerate(reader):
if ((split_index == 0 and ridx == reads_in_first_split) or
(split_index > 0 and ridx % self.reads_per_split == 0)) \
and ridx != 0:
split_index += 1
writer.close()
writer = FastaWriter(self._out_fn(split_index))
self.out_fns.append(self._out_fn(split_index))
writer.writeRecord(r.name, r.sequence[:])
writer.close()
return list(self.out_fns)
def rmOutFNs(self):
"""Remove splitted files."""
for f in self.out_fns:
os.remove(f)
self.out_fns = []
def splitFasta(input_fasta, reads_per_split, out_dir, out_prefix, reads_in_first_split=None):
"""
Split input_fasta into small fasta files each containing at most
reads_per_split reads. All splitted fasta files will be placed under
out_dir with out_prefix. Return paths to splitted files in a list.
"""
obj = FastaSplitter(input_fasta=input_fasta,
reads_per_split=reads_per_split,
out_dir=out_dir, out_prefix=out_prefix)
return obj.split(reads_in_first_split=reads_in_first_split)
def get_args():
"""Get arguments."""
import argparse
parser = argparse.ArgumentParser(
description="Split a fasta file into smaller chunks.")
parser.add_argument("input_fasta",
type=str,
help="Input fasta to be splitted.")
parser.add_argument("reads_per_split",
type=int,
help="Reads per split.")
parser.add_argument("out_dir",
type=str,
help="Output directory.")
parser.add_argument("out_prefix",
type=str,
help="Output files prefix.")
this_args = parser.parse_args()
return this_args
def main():
"""Main function, split a fasta into smaller chunks."""
import logging
from pbtranscript.__init__ import get_version
log = logging.getLogger(__name__)
args = get_args()
from pbtranscript.Utils import setup_log
setup_log(alog=log, level=logging.DEBUG)
log.info("Running {f} v{v}.".format(f=op.basename(__file__),
v=get_version()))
splitFasta(input_fasta=args.input_fasta,
reads_per_split=args.reads_per_split,
out_dir=args.out_dir,
out_prefix=args.out_prefix)
if __name__ == "__main__":
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
7469,
500,
5016,
4600,
22968,
64,
26568,
1967,
63,
543,
30778,
257,
3049,
64,
2393,
656,
198,
17470,
263,
3696,
1123,
7268,
4600,
40779,
62,
525,
62,
35312,
63,
9743,
526,
15931... | 2.143382 | 1,904 |
import torch
import numpy as np
import pickle
import matplotlib.pyplot as plt
n_ring = 16
hbar = 1.0
k = 1.0
kbt = 0.1
beta = 10.0
with open('tmp.pkl', 'rb') as f:
pos, pot, kin = pickle.load(f)
pot = torch.stack(pot).mean()
kin = torch.stack(kin).mean()
print(pot + kin)
pos = torch.stack(pos).flatten()
hist, bin_ = np.histogram(pos, bins=101, range=(-10, 10))
x = 0.5 * (bin_[1:] + bin_[:-1])
y = np.exp(- 0.5 * x * x / kbt)
y = y / y.sum() * hist.sum()
z = gound(x, hbar, torch.tensor(1.0), k) ** 2
z = z / z.sum() * hist.sum()
print('END')
plt.plot(x, hist)
plt.plot(x, y)
plt.plot(x, z)
plt.show()
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
77,
62,
1806,
796,
1467,
198,
71,
5657,
796,
352,
13,
15,
198,
74,
796,
352,
13,
15,
... | 2.124567 | 289 |
class NoCurrentVersionFound(KeyError):
"""
No version node of the a particular parent node could be found
"""
pass
class VersionDoesNotBelongToNode(AssertionError):
"""
The version that is trying to be attached does not belong to the parent node
"""
pass
| [
198,
198,
4871,
1400,
11297,
14815,
21077,
7,
9218,
12331,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1400,
2196,
10139,
286,
262,
257,
1948,
2560,
10139,
714,
307,
1043,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1208,
... | 3.11828 | 93 |
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
from pathlib import Path
import re
filename = Path('courantnumber_max-rfile.out')
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'Computer Modern'
mpl.rcParams['font.size'] = 18
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['text.usetex'] = True
def movingstats(series, interval):
"""Calculating stats of a moving range"""
movingaverage = []
movingstd = []
for i in range(1,len(series)-interval):
movingaverage.append(series[i:i+interval].mean())
movingstd.append(series[i:i+interval].std())
return (movingaverage, movingstd)
def extendingstats(series, startindex):
"""Calculates the stats of a range extending from one point"""
extendaverage, extendstd = [], []
for n in range(startindex+1, len(series)+1):
extendaverage.append(series[startindex:n].mean())
extendstd.append(series[startindex:n].std())
return (extendaverage, extendstd)
headernames = get_headers(filename, 3, r'(?:" ")|["\)\(]', ['\n'])
data = pd.read_table(filename, sep=' ', skiprows=3, names=headernames)
data.head() | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
302,
198,
198,
34345,
796,
10644,
10786,
43220,... | 2.657534 | 438 |
# -*- coding: utf-8 -*-
from django import forms
from django.forms import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from djangosige.apps.cadastro.models import Pessoa, Endereco, Telefone, Email, Site, Banco, Documento
EnderecoFormSet = inlineformset_factory(
Pessoa, Endereco, form=EnderecoForm, extra=1, can_delete=True)
TelefoneFormSet = inlineformset_factory(
Pessoa, Telefone, form=TelefoneForm, extra=1, can_delete=True)
EmailFormSet = inlineformset_factory(
Pessoa, Email, form=EmailForm, extra=1, can_delete=True)
SiteFormSet = inlineformset_factory(
Pessoa, Site, form=SiteForm, extra=1, can_delete=True)
BancoFormSet = inlineformset_factory(
Pessoa, Banco, form=BancoForm, extra=1, can_delete=True)
DocumentoFormSet = inlineformset_factory(
Pessoa, Documento, form=DocumentoForm, extra=1, can_delete=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
6738,
42625,
14208,
1330,
5107,
201,
198,
6738,
42625,
14208,
13,
23914,
1330,
26098,
687,
2617,
62,
69,
9548,
201,
198,
6738,
42625,
14208,
13,
26791,
13... | 2.505495 | 364 |
# common.py
TRANSLATE = [('->', '$\\rightarrow$'),
('...', '\\dots '),
('[', '$[$'),
(']', '$]$'),
('_', '\\_')]
| [
2,
2219,
13,
9078,
198,
198,
5446,
1565,
8634,
6158,
796,
685,
10786,
3784,
3256,
705,
3,
6852,
3506,
6018,
3,
33809,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
19203,
986,
3256,
705,
6852,
67,
1747,
705,
828... | 1.541284 | 109 |
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings("ignore")
url = "https://data.rivm.nl/covid-19/COVID-19_aantallen_gemeente_per_dag.csv"
def get_data(url: str = url) -> pd.DataFrame:
"""
Get data from url
url: url to csv
"""
return pd.read_csv(url, sep=";", parse_dates=["Date_of_publication"])[
["Date_of_publication", "Total_reported"]
]
def new_cases(df: pd.DataFrame) -> pd.DataFrame:
"""
Processes data to get new case counts
df: pandas DataFrame
"""
# Only get total cases based on date of publication
df = df.groupby(df["Date_of_publication"].dt.date).sum().reset_index()
# Rename columns
df.columns = ["date", "cases"]
# Set date as index
df = df.set_index("date")
return df
def smooth_cases(df: pd.DataFrame, window: int = 7, cutoff: int = 25) -> pd.DataFrame:
"""
Smooth new case data
df: pandas DataFrame
window: rolling windown used for smoothing
cuttoff: get start when new cases > cutoff
"""
# Calculate smoohted new cases
smoothed = (
df.rolling(7, win_type="gaussian", min_periods=1, center=True)
.mean(std=2)
.round()
)
# Get start index when new cases > cutoff
idx_start = np.searchsorted(smoothed.values.flatten(), cutoff)
# Filter smoothed and original based on cutoff
smoothed = smoothed.iloc[idx_start:]
original = df.loc[smoothed.index]
return original, smoothed
| [
11748,
14601,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
40539,
654,
13,
24455,
40539,
654,
7203,
46430,
4943,
198,
198,
6371,
796,
366,
5450,
1378,
7890,
13,
380,
14761,
13,
21283,
14,
66... | 2.518519 | 594 |
from django.conf import settings
from DashboardManagement.common import helper
from Referral import models as refer_models
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
16189,
3526,
48032,
13,
11321,
1330,
31904,
198,
6738,
33973,
1373,
1330,
4981,
355,
3522,
62,
27530,
628
] | 4.464286 | 28 |
""" from https://github.com/keithito/tacotron """
print(1)
from text import cmudict, pinyin
_pad = "_"
_punctuation = "!'(),.:;? "
_special = "-"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
_silences = ["@sp", "@spn", "@sil"]
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ["@" + s for s in cmudict.valid_symbols]
_pinyin = ["@" + s for s in pinyin.valid_symbols]#===========这个地方要自己添加.
#==========加上自己字典的特殊pinyin符号.
with open('madarin_lexicon.txt') as f:
tmp=f.readlines()
pass
print(1)
# Export all symbols:
symbols = (
[_pad]
+ list(_special)
+ list(_punctuation)
+ list(_letters)
+ _arpabet
+ _pinyin
+ _silences
)
print("打印全的不symbols",symbols)
with open("当前使用的symbols是",'w')as f :
f.write(str(symbols))
#=============symbols要自己手动加入自己需要的汉语拼音才行!!!!!!!! | [
37811,
422,
3740,
1378,
12567,
13,
785,
14,
365,
342,
10094,
14,
83,
330,
313,
1313,
37227,
198,
4798,
7,
16,
8,
628,
198,
6738,
2420,
1330,
12067,
463,
713,
11,
279,
3541,
259,
198,
198,
62,
15636,
796,
45434,
1,
198,
62,
79,
1... | 1.928571 | 462 |
import cog
from pathlib import Path
import torch
import pixray
import yaml
import pathlib
import os
import yaml
from cogrun import create_temporary_copy
| [
11748,
43072,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
28034,
198,
11748,
279,
844,
2433,
198,
11748,
331,
43695,
198,
11748,
3108,
8019,
198,
11748,
28686,
198,
11748,
331,
43695,
198,
198,
6738,
43072,
5143,
1330,
2251,
62,
11498... | 3.5 | 44 |
# -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from pylero.base_polarion import BasePolarion
class BuildTestResults(BasePolarion):
"""Object to handle the Polarion WSDL tns2:BuildTestResults class
Attributes:
error_count (int)
failure_count (int)
skipped_count (int)
test_count (int)
"""
_cls_suds_map = {"error_count": "errorCount",
"failure_count": "failureCount",
"skipped_count": "skippedCount",
"test_count": "testCount",
"uri": "_uri",
"_unresolved": "_unresolved"}
_obj_client = "builder_client"
_obj_struct = "tns2:BuildTestResults"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
12972,
1754,
78,
13... | 2.178273 | 359 |
# Generated by Django 2.1.2 on 2018-12-09 05:48
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
17,
319,
2864,
12,
1065,
12,
2931,
8870,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import numpy as np
import math
iris = [1.43, -0.4, 0.23]
softmax(iris, 3)
# array([0.684, 0.11 , 0.206])
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
220,
198,
198,
29616,
796,
685,
16,
13,
3559,
11,
532,
15,
13,
19,
11,
657,
13,
1954,
60,
198,
198,
4215,
9806,
7,
29616,
11,
513,
8,
220,
198,
2,
7177,
26933,
15,
13,
41580,
11,... | 2.018519 | 54 |
import torch
from elf.segmentation.features import compute_rag
| [
11748,
28034,
198,
6738,
23878,
13,
325,
5154,
341,
13,
40890,
1330,
24061,
62,
22562,
628,
628
] | 3.882353 | 17 |
#-*- coding: utf-8 -*-
import os.path as p
import os
import cv2 as cv
import numpy as np
if __name__ == '__main__':
global startNum, name
load_images()
print('[increasing number]_[x]_[y]_[width]_[height]_[name].jpg will be annoated in file name!')
name = input('Name?')
startNum = int(input('Number? (1 is recommended)'))
print('===START===')
for idx, image in enumerate(imageList):
print(idx+'/'+len(imageList), 'target: ', image)
show_image(image, idx+startNum)
print('===END===')
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
13,
6978,
355,
279,
198,
11748,
28686,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
355,
45941,
198,
220,
220,
198,
361,
11593,
3672,
83... | 2.536946 | 203 |
from enum import Enum
from typing import NamedTuple
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
34441,
51,
29291,
628,
198
] | 3.857143 | 14 |
from typing import Callable, List, ClassVar, Sequence
import attr
from event_manager.event import Event
from event_manager.exchanges import ORDER_EXCHANGE
from .order_line import OrderLine
@attr.s
| [
6738,
19720,
1330,
4889,
540,
11,
7343,
11,
5016,
19852,
11,
45835,
198,
11748,
708,
81,
198,
6738,
1785,
62,
37153,
13,
15596,
1330,
8558,
198,
198,
6738,
1785,
62,
37153,
13,
1069,
36653,
1330,
38678,
62,
6369,
3398,
27746,
198,
673... | 3.571429 | 56 |
#!/usr/bin/env python
#
# Copyright 2018 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import common
import os
import shutil
import subprocess
import utils
# This is basically all the deps of g++-multilib-mips64el-linux-gnuabi64 that
# are not already installed on the bots.
#
# We could try to also include packages that *are* already installed on the bots
# as well, but that would be quite a bit, and would probably entail more hacky
# fixes like below.
#
# There is probably a way to generate this list from apt, but it's not as
# straightforward as it should be.
PKGS = [
'binutils-mips64el-linux-gnuabi64',
'cpp-8-mips64el-linux-gnuabi64',
'g++-8-mips64el-linux-gnuabi64',
'gcc-8-cross-base',
'gcc-8-mips64el-linux-gnuabi64',
'gcc-8-mips64el-linux-gnuabi64-base',
'libatomic1-mips64el-cross',
'libc6-dev-mips64el-cross',
'libc6-mips64el-cross',
'libgcc-8-dev-mips64el-cross',
'libgcc1-mips64el-cross',
'libgomp1-mips64el-cross',
'libisl19',
'libmpfr6', # This is new in buster, so build machines don't have it yet.
'libstdc++-8-dev-mips64el-cross',
'libstdc++6-mips64el-cross',
'linux-libc-dev-mips64el-cross',
]
def create_asset(target_dir):
"""Create the asset."""
# This is all a bit hacky. Rather than installing to a chroot, we just extract
# all the packages to the target dir, then fix things up so that it can be
# used in our recipes.
with utils.tmp_dir():
# Download required Debian packages.
subprocess.check_call(['apt-get', 'download'] + PKGS)
for f in os.listdir('.'):
subprocess.check_call(['dpkg-deb', '--extract', f, target_dir])
parent_dir = os.path.join(target_dir, 'usr')
# Remove unnecessary files that cause problems with zipping (due to dangling
# symlinks).
os.remove(os.path.join(parent_dir,
'lib/gcc-cross/mips64el-linux-gnuabi64/8/libcc1.so'))
shutil.rmtree(os.path.join(parent_dir, 'share'))
# Remove usr/ prefix.
for d in os.listdir(parent_dir):
os.rename(os.path.join(parent_dir, d), os.path.join(target_dir, d))
os.rmdir(parent_dir)
# Remove absolute paths in GNU ld scripts.
lib_dir = os.path.join(target_dir, 'mips64el-linux-gnuabi64/lib')
ld_script_token = 'OUTPUT_FORMAT(elf64-tradlittlemips)'
ld_script_files = subprocess.check_output(
['grep', '--recursive', '--files-with-matches',
'--binary-files=without-match', '--fixed-strings', ld_script_token,
lib_dir]).split()
abs_path = '/usr/mips64el-linux-gnuabi64/lib/'
for f in ld_script_files:
with open(f) as script:
contents = script.read()
contents = contents.replace(abs_path, '')
with open(f, 'w') as script:
script.write(contents)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2864,
3012,
3457,
13,
198,
2,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
... | 2.524144 | 1,139 |
from collections import OrderedDict
import torch
import torch.optim as optim
import torch.distributed as dist
from .vsr_model import VSRModel
from .networks import define_generator, define_discriminator
from .networks.vgg_nets import VGGFeatureExtractor
from .optim import define_criterion, define_lr_schedule
from utils import base_utils, net_utils, dist_utils
class VSRGANModel(VSRModel):
""" A model wrapper for subjective video super-resolution
"""
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
13,
17080,
6169,
355,
1233,
198,
198,
6738,
764,
14259,
81,
62,
19849,
1330,
569,
12562,
17633,
198,
6738,
764,... | 3.470149 | 134 |
import io
import base64
import requests
from typing import Dict, List
from .utils import retry, return_curl_or_response
class ViImageClient:
"""
Search and Encoding of Images
"""
@return_curl_or_response('json')
@retry()
def search_image(
self,
collection_name: str,
image,
fields: List,
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector:bool=False,
include_count:bool=True,
asc:bool=False,
return_curl: bool=False,
**kwargs
):
"""
Search an image field with image using Vector Search
Vector similarity search with an image directly.
_note: image has to be stored somewhere and be provided as image_url, a url that stores the image_
For example: an image_url represents an image of a celebrity::
"https://www.celebrity_images.com/brad_pitt.png"
-> <Encode the image to vector> ->
image vector: [0.794617772102356, 0.3581121861934662, 0.21113917231559753, 0.24878688156604767, 0.9741804003715515 ...]
-> <Vector Search> ->
Search Results: {...}
Args:
image_url:
The image url of an image to encode into a vector
collection_name:
Name of Collection
search_fields:
Vector fields to search through
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size:
Size of each page of results
page:
Page of the results
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
hundred_scale:
Whether to scale up the metric by 100
asc:
Whether to sort the score by ascending order (default is false, for getting most similar results)
"""
if type(image) == str:
if "http" in image:
params = {
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"image_url": image,
"search_fields": fields,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
"asc": asc
}
params.update(kwargs)
return requests.post(
url="{}/collection/search_with_image".format(self.url),
json=params
)
elif type(image) == bytes:
params = {
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"image": image.decode(),
"search_fields": fields,
"metric": metric,
"min_score": min_score,
"page": page,
"page_size": page_size,
"include_vector": include_vector,
"include_count": include_count,
"asc": asc
}
params.update(kwargs)
return requests.post(
url="{}/collection/search_with_image_upload".format(self.url),
json=params
)
@retry()
def search_image_by_upload(
self,
collection_name: str,
image,
fields: List,
metric: str = "cosine",
min_score=None,
page: int = 1,
page_size: int = 10,
include_vector=False,
include_count=True,
asc=False,
return_curl: bool=False,
**kwargs
):
"""
Search an image field with uploaded image using Vector Search
Vector similarity search with an uploaded image directly.
_note: image has to be sent as a base64 encoded string_
Args:
collection_name:
Name of Collection
search_fields:
Vector fields to search against
page_size:
Size of each page of results
page:
Page of the results
approx:
Used for approximate search
sum_fields:
Whether to sum the multiple vectors similarity search score as 1 or seperate
metric:
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
min_score:
Minimum score for similarity metric
include_vector:
Include vectors in the search results
include_count:
Include count in the search results
hundred_scale:
Whether to scale up the metric by 100
image:
Image in local file path
asc:
Whether to sort the score by ascending order (default is false, for getting most similar results)
"""
with open(image, "rb") as fd:
contents = fd.read()
return self.search_image(
base64.b64encode(io.BytesIO(contents).read()),
collection_name,
fields,
metric,
page,
page_size,
include_vector,
include_count,
asc,
return_curl=return_curl,
**kwargs
)
@return_curl_or_response('json')
@retry()
def encode_image(self, collection_name: str, image, return_curl: bool=False, **kwargs):
"""
Encode image into a vector
_note: image has to be stored somewhere and be provided as image_url, a url that stores the image_
For example: an image_url represents an image of a celebrity::
"https://www.celebrity_images.com/brad_pitt.png"
-> <Encode the image to vector> ->
image vector: [0.794617772102356, 0.3581121861934662, 0.21113917231559753, 0.24878688156604767, 0.9741804003715515 ...]
Args:
image:
The image url of an image to encode into a vector
collection_name:
Name of Collection
"""
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"image_url": image,
}
params.update(kwargs)
response = requests.get(
url="{}/collection/encode_image".format(self.url),
params=params
)
@return_curl_or_response('json')
@retry()
def encode_image_job(
self, collection_name: str, image_field: str, refresh: bool = False, return_curl: bool=False, **kwargs
):
"""
Encode all images in a field into vectors
Within a collection encode the specified image field in every document into vectors.
_note: image has to be stored somewhere and be provided as image_url, a url that stores the image_
For example, an image_url field "celebrity_image" represents an image of a celebrity::
document 1 image_url field: {"celebrity_image" : "https://www.celebrity_images.com/brad_pitt".png}
document 2 image_url field: {"celebrity_image" : "https://www.celebrity_images.com/brad_pitt.png"}
-> <Encode the images to vectors> ->
document 1 image_url vector: {"celebrity_image_vector_": [0.794617772102356, 0.3581121861934662, 0.21113917231559753, 0.24878688156604767, 0.9741804003715515 ...]}
document 2 image_url vector: {"celebrity_image_vector_": [0.8364648222923279, 0.6280597448348999, 0.8112713694572449, 0.36105549335479736, 0.005313870031386614 ...]}
Args:
image_field:
The image field to encode into vectors
refresh:
Whether to refresh the whole collection and re-encode all to vectors
collection_name:
Name of Collection
"""
params= {
"username": self.username,
"api_key": self.api_key,
"collection_name": collection_name,
"image_field": image_field,
"refresh": refresh,
}
params.update(kwargs)
return requests.get(
url="{}/collection/jobs/encode_image_field".format(self.url),
params=params
)
| [
11748,
33245,
198,
11748,
2779,
2414,
198,
11748,
7007,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
198,
6738,
764,
26791,
1330,
1005,
563,
11,
1441,
62,
66,
6371,
62,
273,
62,
26209,
198,
198,
4871,
16049,
5159,
11792,
25,
198,
220,
... | 2.256447 | 3,529 |
from . import DiscretePGM, PotentialTable, CliqueTreeInference, LoopyBPInference
import networkx as nx
import pandas
import numpy as np
class BayesNet(DiscretePGM):
''' Implementation of a simple Bayesian Network.'''
def __init__(self, digraph, cardinalities=[], var_names=[]):
''' Construct bayesian network from a DiGraph which specifies causal directions of variables
Arguments:
digraph: networkx.DiGraph instance
'''
assert isinstance(digraph, nx.DiGraph)
DiscretePGM.__init__(self, cardinalities, var_names)
self.G = digraph
self.value_map = {}
| [
198,
6738,
764,
1330,
8444,
8374,
6968,
44,
11,
32480,
10962,
11,
1012,
2350,
27660,
818,
4288,
11,
6706,
11081,
20866,
818,
4288,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
19798,
292,
198,
11748,
299,
32152,
355,
45941,
198,
19... | 2.079452 | 365 |
from django.contrib.gis import admin
from adventure import models
from utils.admin_helper import AdminBase
admin.site.register([models.Track, models.PointOfInterest], AdminBase)
admin.site.register([models.Note, models.FeatureType])
| [
6738,
42625,
14208,
13,
3642,
822,
13,
70,
271,
1330,
13169,
198,
198,
6738,
8855,
1330,
4981,
198,
6738,
3384,
4487,
13,
28482,
62,
2978,
525,
1330,
32053,
14881,
198,
198,
28482,
13,
15654,
13,
30238,
26933,
27530,
13,
24802,
11,
49... | 3.522388 | 67 |
""" Joomla Information Gathering """
from common.colors import red, green, bg, G, R, W, Y, G , good , bad , run , info , end , que
import re
import requests
# Find Joomla version and check it on exploit-db | [
37811,
449,
4207,
5031,
6188,
36397,
37227,
198,
6738,
2219,
13,
4033,
669,
1330,
2266,
11,
4077,
11,
275,
70,
11,
402,
11,
371,
11,
370,
11,
575,
11,
402,
837,
922,
837,
2089,
837,
1057,
837,
7508,
837,
886,
837,
8358,
198,
11748... | 3.306452 | 62 |
# Copyright 2020 Rubrik, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import glob
import inspect
import logging
import os
import shutil
import sys
import jinja2
import rubrik_cdm
if __name__ == "__main__":
# Define the logging params
console_output_handler = logging.StreamHandler()
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] -- %(message)s")
console_output_handler.setFormatter(formatter)
log = logging.getLogger(__name__)
log.addHandler(console_output_handler)
# Uncomment to enable debug logging
# log.setLevel(logging.DEBUG)
# Create template environment
env = jinja2.Environment(
loader=jinja2.PackageLoader('create_docs', 'docs/templates'),
trim_blocks=True,
lstrip_blocks=True
)
build_directory = 'docs/_build'
# Create build directory
try:
os.mkdir(build_directory)
except FileExistsError:
# Empty existing directory
for f in glob.glob('{}/*'.format(build_directory)):
os.remove(f)
# Copy static markdown files from docs/ to the build directory
for f in glob.glob(r'docs/*.md'):
shutil.copy(f, build_directory)
# Get all functions defined in the SDK, both public and internal ones
sdk_functions = get_sdk_functions()
# Generate the function documentation files
for class_fns in sdk_functions.values():
for fn in (class_fns['public'] + class_fns['private']):
generate_function_doc(env, fn[0], fn[1])
# Generate the summary (side navigation) file
generate_summary_doc(env, sdk_functions)
| [
2,
15069,
12131,
6256,
12602,
11,
3457,
13,
198,
2,
198,
2,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
220,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
366,
25423,
... | 2.96868 | 894 |
from traduki.sqla import initialize
__all__ = (initialize.__name__)
| [
6738,
2083,
11308,
13,
31166,
5031,
1330,
41216,
198,
198,
834,
439,
834,
796,
357,
36733,
1096,
13,
834,
3672,
834,
8,
198
] | 3 | 23 |
import unittest
from even_last import checkio
if __name__ == "__main__": # pragma: no cover
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
772,
62,
12957,
1330,
2198,
952,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
220,
1303,
23864,
2611,
25,
645,
3002,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419... | 2.659091 | 44 |
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import json
import random
class spotify_api:
"""
Class that uses that spotify api to get a random song depending
on the mood of the user.
"""
def __init__(self, emotion):
"""
Constructor initialiazes spotify instance and client ID and key
Arguments:
emotion: gets a string that tells the emotion of the person
"""
self.__clientID = '0ba2ff8c64394b11b98626001a7d9439'
self.__key= 'ee1a06ad1f2c44c7bb5fccb6adbb3672'
self.__redirectURI = 'http://localhost:8888/callback'
client_credentials_manager = SpotifyClientCredentials(client_id=self.__clientID, client_secret=self.__key)
self.__sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
self.__emotion = emotion
def search_emotion(self):
"""
gets a list of playlists based on the emotion that is given
Returns:
list if json objects of top 50 the playlists that were found
"""
result = self.__sp.search(q=self.__emotion, limit= 50, type='playlist')
return result['playlists']['items']
def get_random_playlist(self):
"""
Get a random playlist from the list
Returns:
the name of the playlist as a string
"""
playlist_list = self.search_emotion()
index = random.randrange(0,len(playlist_list))
return (playlist_list[index]['name'])
def search_playlist(self):
"""
searches for top 50 songs based on the playlist
Returns:
a list of json objects that contain the names of the songs from
the given playlist
"""
playlist = self.get_random_playlist()
result = self.__sp.search(q=playlist,limit=50, type='track')
return result['tracks']['items']
def get_random_song(self):
"""
Gets the song and and link from a given object and its Link
Returns:
a list of strings that contains the song name and the link
"""
song_list = self.search_playlist()
index = random.randrange(0,len(song_list))
song_data = []
song_data.append(song_list[index]['name'])
song_data.append(song_list[index]['external_urls']['spotify'])
return song_data
#test delete later
x = spotify_api('happy')
print(x.get_random_song()) | [
11748,
4136,
541,
88,
198,
6738,
4136,
541,
88,
13,
12162,
1071,
17,
1330,
26778,
11792,
34,
445,
14817,
198,
11748,
33918,
198,
11748,
4738,
198,
198,
4871,
4136,
1958,
62,
15042,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
... | 2.350282 | 1,062 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import yaml
import netifaces
from libcloud.compute.base import NodeState
from libcloud.compute.deployment import Deployment
from libcloud.compute.deployment import ScriptDeployment
from libcloud.compute.deployment import SSHKeyDeployment
from libcloud.compute.ssh import SSHClient
from plumbery.exception import PlumberyException
from plumbery.nodes import PlumberyNodes
from plumbery.polisher import PlumberyPolisher
from plumbery.text import PlumberyText
from plumbery.text import PlumberyNodeContext
from plumbery.plogging import plogging
class FileContentDeployment(Deployment):
"""
Installs a file on a target node.
"""
def __init__(self, content, target):
"""
:type content: ``str``
:keyword content: Content of the target file to create
:type target: ``str``
:keyword target: Path to install file on node
"""
self.content = content
self.target = target
def run(self, node, client):
"""
Writes the file.
See also :class:`Deployment.run`
"""
client.put(path=self.target, contents=self.content)
return node
class RebootDeployment(Deployment):
"""
Reboots a node and let cloud-init do the dirty job.
"""
def __init__(self, container):
"""
:param container: the container of this node
:type container: :class:`plumbery.PlumberyInfrastructure`
"""
self.region = container.region
def run(self, node, client):
"""
Reboots the node.
See also :class:`Deployment.run`
"""
repeats = 0
while True:
try:
self.region.reboot_node(node)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
if 'VM_TOOLS_INVALID_STATUS' in str(feedback):
if repeats < 5:
time.sleep(10)
repeats += 1
continue
plogging.error("- unable to reboot node")
plogging.error(str(feedback))
finally:
return node
class PreparePolisher(PlumberyPolisher):
"""
Bootstraps nodes via ssh
This polisher looks at each node in sequence, and contact selected nodes
via ssh to prepare them. The goal here is to accelerate post-creation
tasks as much as possible.
Bootstrapping steps can consist of multiple tasks:
* push a SSH public key to allow for automated secured communications
* ask for package update
* install docker
* install any pythons script
* install Stackstorm
* configure a Chef client
* register a node to a monitoring dashboard
* ...
To activate this polisher you have to mention it in the fittings plan,
like in the following example::
---
safeMode: False
actions:
- prepare:
key: ~/.ssh/myproject_rsa.pub
---
# Frankfurt in Europe
locationId: EU6
regionId: dd-eu
...
Plumbery will only prepare nodes that have been configured for it. The
example below demonstrates how this can be done for multiple docker
containers::
# some docker resources
- docker:
domain: *vdc1
ethernet: *containers
nodes:
- docker1:
prepare: &docker
- run prepare.update.sh
- run prepare.docker.sh
- docker2:
prepare: *docker
- docker3:
prepare: *docker
In the real life when you have to prepare any appliance, you need to be
close to the stuff and to touch it. This is the same for virtual fittings.
This polisher has the need to communicate directly with target
nodes over the network.
This connectivity can become quite complicated because of the potential mix
of private and public networks, firewalls, etc. To stay safe plumbery
enforces a simple beachheading model, where network connectivity with end
nodes is a no brainer.
This model is based on predefined network addresses for plumbery itself,
as in the snippet below::
---
# Frankfurt in Europe
locationId: EU6
regionId: dd-eu
# network subnets are 10.1.x.y
prepare:
- beachhead: 10.1.3.4
Here nodes at EU6 will be prepared only if the machine that is
executing plumbery has the adress 10.1.3.4. In other cases, plumbery will
state that the location is out of reach.
"""
def upgrade_vmware_tools(self, node):
"""
Upgrade VMware tools on target node
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
"""
if self.engine.safeMode:
return True
while True:
try:
self.region.ex_update_vm_tools(node=node)
plogging.info("- upgrading vmware tools")
return True
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
if 'Please try again later' in str(feedback):
time.sleep(10)
continue
if 'NO_CHANGE' in str(feedback):
plogging.debug("- vmware tools is already up-to-date")
return True
plogging.warning("- unable to upgrade vmware tools")
plogging.warning(str(feedback))
return False
def _apply_prepares(self, node, steps):
"""
Does the actual job over SSH
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param steps: the various steps of the preparing
:type steps: ``list`` of ``dict``
:return: ``True`` if everything went fine, ``False`` otherwise
:rtype: ``bool``
"""
if node is None or node.state != NodeState.RUNNING:
plogging.warning("- skipped - node is not running")
return False
# select the address to use
if len(node.public_ips) > 0:
target_ip = node.public_ips[0]
elif node.extra['ipv6']:
target_ip = node.extra['ipv6']
else:
target_ip = node.private_ips[0]
# use libcloud to communicate with remote nodes
session = SSHClient(hostname=target_ip,
port=22,
username=self.user,
password=self.secret,
key_files=self.key_files,
timeout=10)
repeats = 0
while True:
try:
session.connect()
break
except Exception as feedback:
repeats += 1
if repeats > 5:
plogging.error("Error: can not connect to '{}'!".format(
target_ip))
plogging.error("- failed to connect")
return False
plogging.debug(str(feedback))
plogging.debug("- connection {} failed, retrying".format(repeats))
time.sleep(10)
continue
while True:
try:
if self.engine.safeMode:
plogging.info("- skipped - no ssh interaction in safe mode")
else:
for step in steps:
plogging.info('- {}'.format(step['description']))
step['genius'].run(node, session)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
plogging.error("Error: unable to prepare '{}' at '{}'!".format(
node.name, target_ip))
plogging.error(str(feedback))
plogging.error("- failed")
result = False
else:
result = True
break
try:
session.close()
except:
pass
return result
def _get_prepares(self, node, settings, container):
"""
Defines the set of actions to be done on a node
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param settings: the fittings plan for this node
:type settings: ``dict``
:param container: the container of this node
:type container: :class:`plumbery.PlumberyInfrastructure`
:return: a list of actions to be performed, and related descriptions
:rtype: a ``list`` of `{ 'description': ..., 'genius': ... }``
"""
if not isinstance(settings, dict):
return []
environment = PlumberyNodeContext(node=node,
container=container,
context=self.facility)
prepares = []
for key_file in self.key_files:
try:
path = os.path.expanduser(key_file)
with open(path) as stream:
key = stream.read()
stream.close()
prepares.append({
'description': 'deploy SSH public key',
'genius': SSHKeyDeployment(key=key)})
except IOError:
plogging.warning("no ssh key in {}".format(key_file))
if ('prepare' in settings
and isinstance(settings['prepare'], list)
and len(settings['prepare']) > 0):
plogging.info('- using prepare commands')
for script in settings['prepare']:
tokens = script.split(' ')
if len(tokens) == 1:
tokens.insert(0, 'run')
if tokens[0] in ['run', 'run_raw']: # send and run a script
script = tokens[1]
if len(tokens) > 2:
args = tokens[2:]
else:
args = []
plogging.debug("- {} {} {}".format(
tokens[0], script, ' '.join(args)))
try:
with open(script) as stream:
text = stream.read()
if(tokens[0] == 'run'
and PlumberyText.could_expand(text)):
plogging.debug("- expanding script '{}'"
.format(script))
text = PlumberyText.expand_string(
text, environment)
if len(text) > 0:
plogging.info("- running '{}'"
.format(script))
prepares.append({
'description': ' '.join(tokens),
'genius': ScriptDeployment(
script=text,
args=args,
name=script)})
else:
plogging.error("- script '{}' is empty"
.format(script))
except IOError:
plogging.error("- unable to read script '{}'"
.format(script))
elif tokens[0] in ['put', 'put_raw']: # send a file
file = tokens[1]
if len(tokens) > 2:
destination = tokens[2]
else:
destination = './'+file
plogging.debug("- {} {} {}".format(
tokens[0], file, destination))
try:
with open(file) as stream:
content = stream.read()
if(tokens[0] == 'put'
and PlumberyText.could_expand(content)):
plogging.debug("- expanding file '{}'"
.format(file))
content = PlumberyText.expand_string(
content, environment)
plogging.info("- putting file '{}'"
.format(file))
prepares.append({
'description': ' '.join(tokens),
'genius': FileContentDeployment(
content=content,
target=destination)})
except IOError:
plogging.error("- unable to read file '{}'"
.format(file))
else: # echo a sensible message eventually
if tokens[0] == 'echo':
tokens.pop(0)
message = ' '.join(tokens)
message = PlumberyText.expand_string(
message, environment)
plogging.info("- {}".format(message))
if ('cloud-config' in settings
and isinstance(settings['cloud-config'], dict)
and len(settings['cloud-config']) > 0):
plogging.info('- using cloud-config')
# mandatory, else cloud-init will not consider user-data
plogging.debug('- preparing meta-data')
meta_data = 'instance_id: dummy\n'
destination = '/var/lib/cloud/seed/nocloud-net/meta-data'
prepares.append({
'description': 'put meta-data',
'genius': FileContentDeployment(
content=meta_data,
target=destination)})
plogging.debug('- preparing user-data')
expanded = PlumberyText.expand_string(
settings['cloud-config'], environment)
user_data = '#cloud-config\n'+expanded
plogging.debug(user_data)
destination = '/var/lib/cloud/seed/nocloud-net/user-data'
prepares.append({
'description': 'put user-data',
'genius': FileContentDeployment(
content=user_data,
target=destination)})
plogging.debug('- preparing remote install of cloud-init')
script = 'prepare.cloud-init.sh'
try:
path = os.path.dirname(__file__)+'/'+script
with open(path) as stream:
text = stream.read()
if text:
prepares.append({
'description': 'run '+script,
'genius': ScriptDeployment(
script=text,
name=script)})
except IOError:
raise PlumberyException("Error: cannot read '{}'"
.format(script))
plogging.debug('- preparing reboot to trigger cloud-init')
prepares.append({
'description': 'reboot node',
'genius': RebootDeployment(
container=container)})
return prepares
def go(self, engine):
"""
Starts the prepare process
:param engine: access to global parameters and functions
:type engine: :class:`plumbery.PlumberyEngine`
"""
super(PreparePolisher, self).go(engine)
self.report = []
self.user = engine.get_shared_user()
self.secret = engine.get_shared_secret()
self.key_files = engine.get_shared_key_files()
if 'key' in self.settings:
key = self.settings['key']
key = os.path.expanduser(key)
if os.path.isfile(key):
plogging.debug("- using shared key {}".format(key))
if self.key_files is None:
self.key_files = [key]
else:
self.key_files.insert(0, key)
else:
plogging.error("Error: missing file {}".format(key))
def move_to(self, facility):
"""
Checks if we can beachhead at this facility
:param facility: access to local parameters and functions
:type facility: :class:`plumbery.PlumberyFacility`
This function lists all addresses of the computer that is running
plumbery. If there is at least one routable IPv6 address, then
it assumes that communication with nodes is possible. If no suitable
IPv6 address can be found, then plumbery falls back to IPv4.
Beachheading is granted only if the address of the computer running
plumbery matches the fitting parameter ``beachhead``.
"""
self.facility = facility
self.region = facility.region
self.nodes = PlumberyNodes(facility)
self.beachheading = False
try:
self.addresses = []
for interface in netifaces.interfaces():
addresses = netifaces.ifaddresses(interface)
if netifaces.AF_INET in addresses.keys():
for address in addresses[netifaces.AF_INET]:
# strip local loop
if address['addr'].startswith('127.0.0.1'):
continue
self.addresses.append(address['addr'])
if netifaces.AF_INET6 in addresses.keys():
for address in addresses[netifaces.AF_INET6]:
# strip local loop
if address['addr'].startswith('::1'):
continue
# strip local link addresses
if address['addr'].startswith('fe80::'):
continue
# we have a routable ipv6, so let's go
self.beachheading = True
except Exception as feedback:
plogging.error(str(feedback))
for item in self.facility.get_setting('prepare', []):
if not isinstance(item, dict):
continue
if 'beachhead' not in item.keys():
continue
if item['beachhead'] in self.addresses:
self.beachheading = True
break
if self.beachheading:
plogging.debug("- beachheading at '{}'".format(
self.facility.get_setting('locationId')))
else:
plogging.debug("- not beachheading at '{}'".format(
self.facility.get_setting('locationId')))
def attach_node_to_internet(self, node, ports=[]):
"""
Adds address translation for one node
:param node: node that has to be reachable from the internet
:type node: :class:`libcloud.common.Node`
:param ports: the ports that have to be opened
:type ports: a list of ``str``
"""
plogging.info("Making node '{}' reachable from the internet"
.format(node.name))
domain = self.container.get_network_domain(
self.container.blueprint['domain']['name'])
internal_ip = node.private_ips[0]
external_ip = None
for rule in self.region.ex_list_nat_rules(domain):
if rule.internal_ip == internal_ip:
external_ip = rule.external_ip
plogging.info("- node is reachable at '{}'".format(external_ip))
if self.engine.safeMode:
plogging.info("- skipped - safe mode")
return
if external_ip is None:
external_ip = self.container._get_ipv4()
if external_ip is None:
plogging.info("- no more ipv4 address available -- assign more")
return
while True:
try:
self.region.ex_create_nat_rule(
domain,
internal_ip,
external_ip)
plogging.info("- node is reachable at '{}'".format(
external_ip))
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return
else:
plogging.info("- unable to add address translation")
plogging.error(str(feedback))
break
candidates = self.container._list_candidate_firewall_rules(node, ports)
for rule in self.container._list_firewall_rules():
if rule.name in candidates.keys():
plogging.info("Creating firewall rule '{}'"
.format(rule.name))
plogging.info("- already there")
candidates = {k: candidates[k]
for k in candidates if k != rule.name}
for name, rule in candidates.items():
plogging.info("Creating firewall rule '{}'"
.format(name))
if self.engine.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
self.container._ex_create_firewall_rule(
network_domain=domain,
rule=rule,
position='LAST')
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create firewall rule")
plogging.error(str(feedback))
return external_ip
def shine_node(self, node, settings, container):
"""
prepares a node
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param settings: the fittings plan for this node
:type settings: ``dict``
:param container: the container of this node
:type container: :class:`plumbery.PlumberyInfrastructure`
"""
self.container = container
plogging.info("Preparing node '{}'".format(settings['name']))
if node is None:
plogging.error("- not found")
return
timeout = 300
tick = 6
while node.extra['status'].action == 'START_SERVER':
time.sleep(tick)
node = self.nodes.get_node(node.name)
timeout -= tick
if timeout < 0:
break
if node.state != NodeState.RUNNING:
plogging.error("- skipped - node is not running")
return
self.upgrade_vmware_tools(node)
prepares = self._get_prepares(node, settings, container)
if len(prepares) < 1:
plogging.info('- nothing to do')
self.report.append({node.name: {
'status': 'skipped - nothing to do'
}})
return
if len(node.public_ips) > 0:
plogging.info("- node is reachable at '{}'".format(
node.public_ips[0]))
node.transient = False
elif container.with_transient_exposure():
external_ip = self.attach_node_to_internet(node, ports=['22'])
if external_ip is None:
plogging.error('- no IP has been assigned')
self.report.append({node.name: {
'status': 'unreachable'
}})
return
node.public_ips = [external_ip]
node.transient = True
elif not self.beachheading:
plogging.error('- node is unreachable')
self.report.append({node.name: {
'status': 'unreachable'
}})
return
descriptions = []
for item in prepares:
descriptions.append(item['description'])
if self._apply_prepares(node, prepares):
self.report.append({node.name: {
'status': 'completed',
'prepares': descriptions
}})
else:
self.report.append({node.name: {
'status': 'failed',
'prepares': descriptions
}})
if node.transient:
self.container._detach_node_from_internet(node)
def reap(self):
"""
Reports on preparing
"""
if 'output' not in self.settings:
return
fileName = self.settings['output']
plogging.info("Reporting on preparations in '{}'".format(fileName))
with open(fileName, 'w') as stream:
stream.write(yaml.dump(self.report, default_flow_style=False))
stream.close()
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
7054,
37,
... | 1.960159 | 13,554 |
# =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from os import listdir, sep, path, makedirs
from os.path import isfile, join
from diff_match_patch import diff_match_patch
from urlparse import urlparse
from constants import PlatformFamily
from itertools import count, groupby
import re
import sys
import os
import stat
import time
import datetime
import importlib
import tarfile
import urllib
import re
from constants import get_log_directory, get_temp_directory
from __builtin__ import True
def remove_extra_spaces(str):
"""
Given a comma delimited string and remove extra spaces
Example: 'x x , y, z' becomes 'x x,y,z'
"""
if str is not None:
return ','.join([re.sub(r'\s+', ' ', x).strip() for x in str.split(',')])
return str
def get_datetime(date_string, format=None):
"""
Converts a datetime string to internal python datetime.
Returns None if the string is not a valid date time.
"""
try:
if not format:
# 2016-12-12 13:07:32
match = re.search('\d+-\d+-\d+ \d+:\d+:\d+', date_string)
if match:
format = '%Y-%m-%d %H:%M:%S'
else:
# 01/17/2017 11:10 PM
match = re.search('\d+/\d+/\d+ \d+:\d+ [A|P]M', date_string)
if match:
format = "%m/%d/%Y %I:%M %p"
else:
# FIXME: Best effort for now.
format = "%m-%d-%Y %I:%M %p"
return datetime.datetime.strptime(date_string, format)
except:
return None
def multiple_replace(string, rep_dict):
"""
Performs a one-pass replacements
"""
pattern = re.compile("|".join([re.escape(k) for k in rep_dict.keys()]), re.M)
return pattern.sub(lambda x: rep_dict[x.group(0)], string)
def datetime_from_local_to_utc(local_datetime):
"""
:param local_datetime: Python datetime object
:return: UTC datetime string
"""
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.mktime(local_datetime.timetuple())))
def untar(tar_file_path, output_directory, remove_tar_file=None):
"""
Extract the tar file to a given output file directory and return the
content of the tar file as an array of filenames.
"""
file_list = []
tar = tarfile.open(tar_file_path)
tar_info_list = tar.getmembers()
for tar_info in tar_info_list:
file_list.append(tar_info.name)
tar.extractall(output_directory)
tar.close()
# Modify the permission bit after files are extracted
for filename in file_list:
make_file_writable(output_directory + os.path.sep + filename)
# Remove the tar file if indicated.
if remove_tar_file:
os.unlink(tar_file_path)
return file_list
def get_file_list(directory, filter=None):
"""
Given a directory path, returns all files in that directory.
A filter may also be specified, for example, filter = '.pie'.
"""
result_list = []
try:
file_list = [ f for f in listdir(directory) if isfile(join(directory,f)) ]
for file in file_list:
if filter is not None:
if file.find(filter) != -1:
result_list.append(file)
else:
result_list.append(file)
except:
pass
return sorted(result_list)
def make_url(connection_type, host_username, host_password, host_or_ip, port_number, enable_password=None):
"""
Creates a connection URL such as
telnet://user:pass@1.1.1.1 (without port)
telnet://user:pass@1.1.1.1:2048 (with port)
telnet://:pass@1.1.1.1:2048 (empty user)
telnet://user:@1.1.1.1:2048 (empty password)
telnet://user@1.1.1.1:2048 (no password)
telnet://:@1.1.1.1:2048 (empty user and password)
telnet://1.1.1.1:2048 (no user and password)
telnet://user:pass@1.1.1.1:2048/enable password (with enable password)
"""
url = '{}://'.format(connection_type)
no_host_username = False
no_host_password = False
if not is_empty(host_username):
url += '{}'.format(urllib.quote(host_username, safe=""))
else:
no_host_username = True
if not is_empty(host_password):
url += ':{}'.format(urllib.quote(host_password, safe=""))
else:
no_host_password = True
if no_host_username and no_host_password:
url += '{}'.format(host_or_ip)
else:
url += '@{}'.format(host_or_ip)
if not is_empty(port_number):
url += ':{}'.format(port_number)
if not is_empty(enable_password):
url += '?enable_password={}'.format(urllib.quote(enable_password, safe=""))
return url
def concatenate_dirs(dir1, dir2):
"""
Appends dir2 to dir1. It is possible that either/both dir1 or/and dir2 is/are None
"""
result_dir = dir1 if dir1 is not None and len(dir1) > 0 else ''
if dir2 is not None and len(dir2) > 0:
if len(result_dir) == 0:
result_dir = dir2
else:
result_dir = os.path.join(result_dir, dir2)
return result_dir
def get_base_url(url):
"""
Returns the base URL including the port number
e.g. (http://localhost:5000)
"""
parsed = urlparse(url)
base_url = "{}://{}".format(parsed.scheme, parsed.hostname)
if parsed.port is not None:
base_url += ":{}".format(parsed.port)
return base_url
def is_empty(obj):
"""
These conditions are considered empty
s = [], s = None, s = '', s = ' ', s = 'None'
"""
if isinstance(obj, str):
obj = obj.replace('None','').strip()
if obj:
return False
return True
def get_acceptable_string(input_string):
"""
Strips all unwanted characters except a-z, A-Z, 0-9, and '(). -_'
"""
if input_string is not None:
temp = re.sub("[^a-z0-9()-_.\s]", '', input_string, flags=re.I)
return re.sub("\s+", " ", temp).strip()
else:
return None
def check_acceptable_string(input_string):
""" Will throw exception if the result string is blank or None. """
orig_input_string = input_string
input_string = get_acceptable_string(input_string)
if input_string is None or len(input_string) == 0:
raise ValueError('"' + orig_input_string +
'" contains invalid characters. It should only contain a-z, A-Z, 0-9, (). -_')
return input_string
def generate_file_diff(filename1, filename2):
"""
Given two files, return the file diff in HTML format.
"""
text1 = ''
text2 = ''
try:
with open(filename1) as f:
text1 = f.read()
except IOError:
pass
try:
with open(filename2) as f:
text2 = f.read()
except IOError:
pass
dmp = diff_match_patch()
diff = dmp.diff_main(text1, text2)
dmp.diff_cleanupSemantic(diff)
return dmp.diff_prettyHtml(diff)
def generate_ip_range(start_ip, end_ip):
"""
Given the start_ip and end_ip, generate all the IP addresses in between inclusively.
Example, generate_ip_range("192.168.1.0", "192.168.2.0")
"""
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 256:
temp[i] = 0
temp[i - 1] += 1
ip_range.append(".".join(map(str, temp)))
return ip_range
def get_return_url(request, default_url=None):
"""
Returns the return_url encoded in the parameters
"""
url = request.args.get('return_url')
if url is None:
url = default_url
return url
def get_search_results(pattern_list, string_list):
"""
Given a pattern_list and string_list, return an array of dictionary which
indicates whether the string in the list is matchable.
{'string': xxxxx, 'matched': True}
"""
match_results = []
for string in string_list:
matched = False
for pattern in pattern_list:
if re.search(pattern, string) is not None:
matched = True
break
match_results.append({'string': string, 'matched': matched})
return match_results
def convert_integer_list_to_ranges(a_list):
"""
integer_list: [0, 1, 2, 3] returns "0-3"
[0, 1, 2, 4, 8] returns "0-2,4,8"
elements in a_list must be an integer, else it will be ignored.
"""
integer_list = [int(s) for s in a_list if is_integer(s)]
G = (list(x) for _, x in groupby(integer_list, lambda x, c=count(): next(c)-x))
return ",".join("-".join(map(str, (g[0], g[-1])[:len(g)])) for g in G)
if __name__ == '__main__':
print(get_acceptable_string('john SMITH~!@#$%^&*()_+().smith'))
L = [1, 5, 6, 9, 10, 11, 13, 15, 16, 17, 100, 102, 200, 201, 203, 205]
print convert_integer_list_to_ranges(L)
#L = [u'100', u'101', u'103', u'104', u'106', u'107', 'ALE', 119, 120, 122]
#print convert_integer_list_to_ranges(L)
| [
2,
38093,
25609,
198,
2,
15069,
357,
66,
8,
1584,
11,
28289,
11998,
11,
3457,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
104... | 2.422892 | 4,377 |
#
#
# Helper functions used by the modules.
#
#
import pygame
def tsub(tup1, tup2):
""" Subtracts tup1 elements from tup2 elements. """
return (tup1[0]-tup2[0], tup1[1]-tup2[1])
def tadd(tup1, tup2):
""" Adds the elements of tup1 and tup2. """
return (tup1[0]+tup2[0], tup1[1]+tup2[1])
def tflip(tup):
"""
Flips tuple elements.
This is useful for list to screen coordinates translation.
In list of lists: x = rows = vertical
whereas on screen: x = horizontal
"""
return (tup[1], tup[0]) | [
2,
198,
2,
198,
2,
220,
220,
220,
5053,
525,
5499,
973,
416,
262,
13103,
13,
198,
2,
198,
2,
198,
11748,
12972,
6057,
198,
198,
4299,
256,
7266,
7,
83,
929,
16,
11,
256,
929,
17,
2599,
198,
220,
220,
220,
37227,
3834,
83,
974,... | 2.279661 | 236 |
from hypermodel.utilities.hm_shell import sh
from hypermodel.kubeflow.deploy_dev import deploy_to_dev
import logging
import sys
| [
6738,
8718,
19849,
13,
315,
2410,
13,
23940,
62,
29149,
1330,
427,
198,
6738,
8718,
19849,
13,
74,
549,
891,
9319,
13,
2934,
1420,
62,
7959,
1330,
6061,
62,
1462,
62,
7959,
198,
198,
11748,
18931,
198,
11748,
25064,
198
] | 3.225 | 40 |
from sqlalchemy import Column, ForeignKey, Index, Integer
from sqlalchemy.orm import backref, relationship
from grouper.models.base.model_base import Model
class GroupServiceAccount(Model):
"""Service accounts owned by a group.
A group may own zero or more service accounts. This table holds the mapping between a Group and
the ServiceAccount objects it owns.
"""
__tablename__ = "group_service_accounts"
__table_args__ = (
Index("group_service_account_idx", "group_id", "service_account_id", unique=True),
)
id = Column(Integer, primary_key=True)
group_id = Column(Integer, ForeignKey("groups.id"), nullable=False)
group = relationship("Group", backref="service_accounts", foreign_keys=[group_id])
service_account_id = Column(Integer, ForeignKey("service_accounts.id"), nullable=False)
service_account = relationship(
"ServiceAccount",
backref=backref("owner", uselist=False),
foreign_keys=[service_account_id],
)
| [
6738,
44161,
282,
26599,
1330,
29201,
11,
8708,
9218,
11,
12901,
11,
34142,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
736,
5420,
11,
2776,
198,
198,
6738,
1132,
525,
13,
27530,
13,
8692,
13,
19849,
62,
8692,
1330,
9104,
628,
198,
... | 3.033133 | 332 |
#!/usr/bin/env python3
"""
Author : reidloeffler <reidloeffler@localhost>
Date : 2021-11-23
Purpose: Assignment 12
"""
import argparse
import sys
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Python grep',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('search_term',
type=str,
metavar='PATTERN',
help='Search pattern')
parser.add_argument('input_files',
type=argparse.FileType('rt'),
metavar='FILE',
nargs='+',
help='Input file(s)')
parser.add_argument('-i',
'--insensitive',
action='store_true',
default=False,
help='Case-insensitive search')
parser.add_argument('-o',
'--outfile',
type=argparse.FileType('wt'),
metavar='FILE',
default=sys.stdout,
help='Output')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
search_term = args.search_term
input_files = args.input_files
insensitive = args.insensitive
outfile = args.outfile
if '?' in search_term:
search_term = search_term[:len(search_term) - 2]
for file in input_files:
for line in file:
if search_term in line and insensitive is False:
if len(input_files) > 1:
print(file.name + ':', end='', file=outfile)
print(line, end='', file=outfile)
elif (search_term.lower() in line.lower() and insensitive is True):
if len(input_files) > 1:
print(file.name + ':', end='', file=outfile)
print(line, end='', file=outfile)
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
13838,
1058,
302,
312,
24617,
487,
1754,
1279,
260,
312,
24617,
487,
1754,
31,
36750,
29,
198,
10430,
220,
220,
1058,
33448,
12,
1157,
12,
1954,
198,
30026,
3455,
25,
... | 2.047354 | 1,077 |
from django.urls import path, include
from .api import urls
app_name = 'imc'
urlpatterns = [
path(
'api/growth-curve/imc/',
include(urls.imc_curves_patterns_api)
),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
764,
15042,
1330,
2956,
7278,
198,
198,
1324,
62,
3672,
796,
705,
320,
66,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7,
198,
220,
220,
220,... | 2.097826 | 92 |
import ast
from matrix_tools import *
transformations = numpy.loadtxt("Data/AllDetOfOne.txt")
transformations = numpy.reshape(transformations, (len(transformations) / 3, 3, 3))
with open("Data/NiggliBasis.txt") as f:
lines = f.readlines()
num_of_lines = len(lines)
n_basis_file = open("Data/NiggliBasis.txt", "r")
for i in range(0, num_of_lines / 2):
niggli_id = n_basis_file.readline()
basis = n_basis_file.readline()
niggli_id = niggli_id.rstrip()
basis = ast.literal_eval(basis)
transformed = []
for transform in transformations:
transformed.append(calculate_transform(transform, basis))
save_matrix(transformed, "Data/NiggliTransforms/" + niggli_id + "_Transformed.txt")
n_basis_file.close()
| [
11748,
6468,
198,
6738,
17593,
62,
31391,
1330,
1635,
198,
198,
35636,
602,
796,
299,
32152,
13,
2220,
14116,
7203,
6601,
14,
3237,
11242,
5189,
3198,
13,
14116,
4943,
198,
35636,
602,
796,
299,
32152,
13,
3447,
1758,
7,
35636,
602,
1... | 2.557093 | 289 |
from djangocities.user.query import *
from djangocities.cities.query import *
from djangocities.sites.query import *
from djangocities.pages.query import *
| [
6738,
42625,
648,
420,
871,
13,
7220,
13,
22766,
1330,
1635,
198,
6738,
42625,
648,
420,
871,
13,
66,
871,
13,
22766,
1330,
1635,
198,
6738,
42625,
648,
420,
871,
13,
49315,
13,
22766,
1330,
1635,
198,
6738,
42625,
648,
420,
871,
13... | 3.183673 | 49 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import dlib
import cv2
import pickle
from sklearn.svm import SVC
from cam.models import dlib_model, classifier_model
FaceDetection = dlib.get_frontal_face_detector()
# face recognition
sp = dlib.shape_predictor(dlib_model.pose_predictor_model_location())
Description = dlib.face_recognition_model_v1(dlib_model.face_recognition_model_location())
if __name__ == '__main__':
run()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
288,
8019,
198,
11748,
269,
85,
17,
198,
11748,
2298,
293,
198,
6738,
1341,
35720,
13,
82,
14761,
1330,... | 2.710843 | 166 |
# -*-- coding: utf-8 -*
def discretize(self, Npoint=-1):
"""Returns the discretize version of the SurfRing
Parameters
----------
self: SurfRing
A SurfRing object
Npoint : int
Number of point on each line (Default value = -1 => use the line default discretization)
Returns
-------
point_list : list
List of complex coordinates
"""
# check if the SurfRing is correct
self.check()
# getting lines that delimit the SurfLine
point_list = self.out_surf.discretize(Npoint=Npoint)
point_list.extend(self.in_surf.discretize(Npoint=Npoint))
return point_list
| [
2,
532,
9,
438,
19617,
25,
3384,
69,
12,
23,
532,
9,
198,
4299,
1221,
1186,
1096,
7,
944,
11,
399,
4122,
10779,
16,
2599,
198,
220,
220,
220,
37227,
35561,
262,
1221,
1186,
1096,
2196,
286,
262,
43771,
39687,
628,
220,
220,
220,
... | 2.661088 | 239 |
mapping = {foo: bar for<caret> | [
76,
5912,
796,
1391,
21943,
25,
2318,
329,
27,
6651,
83,
29
] | 2.5 | 12 |
from ..utils.generic_utils import deserialize_object
from .rollout_mcts import *
from .evaluation_mcts import *
from .rl_evaluation_mcts import *
| [
6738,
11485,
26791,
13,
41357,
62,
26791,
1330,
748,
48499,
1096,
62,
15252,
198,
6738,
764,
2487,
448,
62,
76,
310,
82,
1330,
1635,
198,
6738,
764,
18206,
2288,
62,
76,
310,
82,
1330,
1635,
198,
6738,
764,
45895,
62,
18206,
2288,
6... | 2.979592 | 49 |
# This is free software licensed under the MIT license.
# Copyright (c) 2021 Samarth Ramesh <samarthr1@outlook.com>
# You should have recived a copy of the MIT license with this file. In case you ahve not, visit https://github.com/samarth-ramsh/mtlaucher
| [
2,
770,
318,
1479,
3788,
11971,
739,
262,
17168,
5964,
13,
198,
2,
15069,
357,
66,
8,
33448,
3409,
11999,
371,
1047,
71,
1279,
37687,
11999,
81,
16,
31,
448,
5460,
13,
785,
29,
198,
2,
921,
815,
423,
664,
1572,
257,
4866,
286,
2... | 3.355263 | 76 |
# Copyright notice:
# Copyright Members of the EMI Collaboration, 2013.
#
# See www.eu-emi.eu for details on the copyright holders
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except:
import json
import logging
from fts3.model import *
from fts3rest.lib.api import doc
from fts3rest.lib.base import BaseController, Session
from fts3rest.lib.helpers import accept
from fts3rest.lib.middleware.fts3auth import authorize
from fts3rest.lib.middleware.fts3auth.constants import *
__controller__ = 'ConfigAuditController'
log = logging.getLogger(__name__)
class ConfigAuditController(BaseController):
"""
Config audit
"""
@doc.return_type(array_of=ConfigAudit)
@authorize(CONFIG)
@accept(html_template='/config/audit.html')
def audit(self):
"""
Returns the last 100 entries of the config audit tables
"""
return Session.query(ConfigAudit).order_by(ConfigAudit.datetime.desc()).limit(100).all()
| [
2,
220,
220,
15069,
4003,
25,
198,
2,
220,
220,
15069,
220,
12688,
286,
262,
412,
8895,
37322,
341,
11,
2211,
13,
198,
2,
198,
2,
220,
220,
4091,
7324,
13,
12496,
12,
43967,
13,
12496,
329,
3307,
319,
262,
6634,
16392,
198,
2,
1... | 3.050201 | 498 |
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
9922,
62,
26675,
11,
277,
16,
62,
26675,
628,
628
] | 3.26087 | 23 |
"""empty message
Revision ID: 140_service_id_null_for_drafts
Revises: 130_acknowledged_at_column
Create Date: 2015-06-22 10:42:37.274484
"""
# revision identifiers, used by Alembic.
revision = '140_service_id_null_for_drafts'
down_revision = '130_acknowledged_at_column'
from alembic import op
import sqlalchemy as sa
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
12713,
62,
15271,
62,
312,
62,
8423,
62,
1640,
62,
35679,
82,
198,
18009,
2696,
25,
11323,
62,
441,
2197,
37436,
62,
265,
62,
28665,
198,
16447,
7536,
25,
1853,
12,
3312,
12,
1828... | 2.722689 | 119 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="django-lazy-services",
version="0.0.3",
author="Gordon Wrigley",
author_email="gordon.wrigley@gmail.com",
description="A helper for switching between test and production versions of a service module",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/tolomea/django-lazy-services",
packages=setuptools.find_packages(exclude=["tests"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.69708 | 274 |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
str_to_int,
)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
302,
198,
198,
6738,
764,
11321,
1330,
14151,
11627,
40450,
198,
6738,
11485,
26791,
1330,
357,
198,
220,
220,
220,
21136,
62,
32257,
11,
198,
220,
220,
220,
... | 2.94 | 50 |
import math
import random
FIRST_DIVISOR = 2
LOWER_LIMIT = 1
UPPER_LIMIT = 100
RULE = 'Answer "yes" if given number is prime. Otherwise answer "no".'
| [
11748,
10688,
198,
11748,
4738,
198,
198,
39776,
2257,
62,
33569,
1797,
1581,
796,
362,
198,
43,
36048,
62,
43,
3955,
2043,
796,
352,
198,
8577,
18973,
62,
43,
3955,
2043,
796,
1802,
198,
198,
49,
24212,
796,
705,
33706,
366,
8505,
... | 2.684211 | 57 |
# -*- coding: utf-8 -*-
from flask import Blueprint, request,json
from pusher import pusher
blueprint = Blueprint('guest', __name__)
@blueprint.route('/new/guest', methods=['POST']) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42903,
1330,
39932,
11,
2581,
11,
17752,
198,
6738,
4192,
372,
1330,
4192,
372,
198,
198,
17585,
4798,
796,
39932,
10786,
5162,
395,
3256,
11593,
3672,
834,
8,
198... | 2.904762 | 63 |
from flask import jsonify
from flask_testing import TestCase
from src.models import db
from src.app import create_app
from src.models.task import Task
| [
6738,
42903,
1330,
33918,
1958,
198,
6738,
42903,
62,
33407,
1330,
6208,
20448,
198,
198,
6738,
12351,
13,
27530,
1330,
20613,
198,
6738,
12351,
13,
1324,
1330,
2251,
62,
1324,
198,
6738,
12351,
13,
27530,
13,
35943,
1330,
15941,
198
] | 3.8 | 40 |
import torch
from snn_layers import SpikeTensor
def validate_snn(net, test_loader, device, criterion, timesteps):
"""Perform validation for SNN"""
print("Performing validation for SNN")
# switch to evaluate mode
net.eval()
correct = 0
with torch.no_grad():
for data_test, target in test_loader:
data, target = data_test.to(device), target.to(device)
data, _ = torch.broadcast_tensors(data, torch.zeros((timesteps,) + data.shape))
data = SpikeTensor(data.permute(1, 2, 3, 4, 0).to(device), timesteps)
output = net(data).firing_ratio()
loss = criterion(output, target)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
print('SNN Prec@1: {}/{} ({:.2f}%)\n'.format(
correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def validate_ann(net, test_loader, device, criterion):
"""Perform validation for ANN"""
print("Performing validation for ANN")
# switch to evaluate mode
net.eval()
correct = 0
with torch.no_grad():
for data_test in test_loader:
data, target = data_test
data = data.to(device)
target = target.to(device)
output = net(data)
loss = criterion(output, target)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
print('ANN Prec@1: {}/{} ({:.2f}%)\n'.format(
correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
| [
11748,
28034,
198,
6738,
3013,
77,
62,
75,
6962,
1330,
26309,
51,
22854,
628,
198,
4299,
26571,
62,
16184,
77,
7,
3262,
11,
1332,
62,
29356,
11,
3335,
11,
34054,
11,
4628,
395,
25386,
2599,
198,
220,
220,
220,
37227,
5990,
687,
2120... | 2.306834 | 717 |
"""Script to train pixelCNN on the CIFAR10 dataset."""
import random as rn
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
class MaskedConv2D(keras.layers.Layer):
"""Convolutional layers with masks.
Convolutional layers with simple implementation of masks type A and B for
autoregressive models.
Arguments:
mask_type: one of `"A"` or `"B".`
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
"""
class ResidualBlock(keras.Model):
"""Residual blocks that compose pixelCNN
Blocks of layers with 3 convolutional layers and one residual connection.
Based on Figure 5 from [1] where h indicates number of filters.
Refs:
[1] - Oord, A. V. D., Kalchbrenner, N., & Kavukcuoglu, K. (2016). Pixel recurrent
neural networks. arXiv preprint arXiv:1601.06759.
"""
def quantise(images, q_levels):
"""Quantise image into q levels."""
return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')
# Loading data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
height = 32
width = 32
n_channel = 3
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape(x_train.shape[0], height, width, n_channel)
x_test = x_test.reshape(x_test.shape[0], height, width, n_channel)
# --------------------------------------------------------------------------------------------------------------
# Quantisize the input data in q levels
q_levels = 8
x_train_quantised_of = quantisize(x_train_overfit, q_levels)
x_test_quantised_of = quantisize(x_test_overfit, q_levels)
# ------------------------------------------------------------------------------------
# Creating input stream using tf.data API
batch_size = 128
train_buf = 60000
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train_quantised / (q_levels - 1),
x_train_quantised.astype('int32')))
train_dataset = train_dataset.shuffle(buffer_size=train_buf)
train_dataset = train_dataset.batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test_quantised / (q_levels - 1),
x_test_quantised.astype('int32')))
test_dataset = test_dataset.batch(batch_size)
# --------------------------------------------------------------------------------------------------------------
# Create PixelCNN model
n_filters = 120
inputs = keras.layers.Input(shape=(height, width, n_channel))
x = MaskedConv2D(mask_type='A', filters=n_filters, kernel_size=7)(inputs)
for i in range(15):
x = keras.layers.Activation(activation='relu')(x)
x = ResidualBlock(h=n_filters)(x)
x = keras.layers.Activation(activation='relu')(x)
x = MaskedConv2D(mask_type='B', filters=n_filters, kernel_size=1)(x)
x = keras.layers.Activation(activation='relu')(x)
x = MaskedConv2D(mask_type='B', filters=n_channel * q_levels, kernel_size=1)(x) # shape [N,H,W,DC]
pixelcnn = tf.keras.Model(inputs=inputs, outputs=x)
# --------------------------------------------------------------------------------------------------------------
# Prepare optimizer and loss function
lr_decay = 0.9999
learning_rate = 5e-3 #5
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
compute_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
# --------------------------------------------------------------------------------------------------------------
@tf.function
# ------------------------------------------------------------------------------------
# Training loop
n_epochs = 20
n_iter = int(np.ceil(x_train_quantised.shape[0] / batch_size))
for epoch in range(n_epochs):
progbar = Progbar(n_iter)
print('Epoch {:}/{:}'.format(epoch + 1, n_epochs))
for i_iter, (batch_x, batch_y) in enumerate(train_dataset):
optimizer.lr = optimizer.lr * lr_decay
loss = train_step(batch_x, batch_y)
progbar.add(1, values=[('loss', loss)])
# ------------------------------------------------------------------------------------
# Test set performance
test_loss = []
for batch_x, batch_y in test_dataset:
logits = pixelcnn(batch_x, training=False)
# Calculate cross-entropy (= negative log-likelihood)
loss = compute_loss(tf.squeeze(tf.one_hot(batch_y, q_levels)), logits)
test_loss.append(loss)
print('nll : {:} nats'.format(np.array(test_loss).mean()))
print('bits/dim : {:}'.format(np.array(test_loss).mean() / np.log(2)))
# ------------------------------------------------------------------------------------
# Generating new images
samples = np.zeros((100, height, width, n_channel), dtype='float32')
for i in range(height):
for j in range(width):
logits = pixelcnn(samples)
next_sample = tf.random.categorical(logits[:, i, j, :], 1)
samples[:, i, j, 0] = (next_sample.numpy() / (q_levels - 1))[:, 0]
fig = plt.figure(figsize=(10, 10))
for i in range(100):
ax = fig.add_subplot(10, 10, i + 1)
ax.matshow(samples[i, :, :, 0], cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
# ------------------------------------------------------------------------------------
# Filling occluded images
occlude_start_row = 16
num_generated_images = 10
samples = np.copy(x_test_quantised[0:num_generated_images, :, :, :])
samples = samples / (q_levels - 1)
samples[:, occlude_start_row:, :, :] = 0
fig = plt.figure(figsize=(10, 10))
for i in range(10):
ax = fig.add_subplot(1, 10, i + 1)
ax.matshow(samples[i, :, :, 0], cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
for i in range(occlude_start_row, height):
for j in range(width):
for k in range(n_channel):
logits = pixelcnn(samples)
logits = tf.reshape(logits, [-1, height, width, q_levels, n_channel])
logits = tf.transpose(logits, perm=[0, 1, 2, 4, 3])
next_sample = tf.random.categorical(logits[:, i, j, k, :], 1)
samples[:, i, j, k] = (next_sample.numpy() / (q_levels - 1))[:, 0]
fig = plt.figure(figsize=(10, 10))
for i in range(10):
ax = fig.add_subplot(1, 10, i + 1)
ax.matshow(samples[i, :, :, 0], cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
if __name__ == '__main__':
main()
| [
198,
37811,
7391,
284,
4512,
17465,
18474,
319,
262,
327,
5064,
1503,
940,
27039,
526,
15931,
198,
11748,
4738,
355,
374,
77,
198,
11748,
640,
198,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,... | 2.563991 | 2,977 |
from flask import Flask, request, Response
import json
import pymongo
from flask_cors import CORS
from bson.json_util import dumps, loads
import os
from azure.storage.blob import BlockBlobService, PublicAccess
from celery import Celery
import subprocess
import uuid
app = Flask(__name__)
CORS(app)
db_client = pymongo.MongoClient(os.environ['SPASS_CONNECTION_STRING']).spassDatabase
seismic_blob = BlockBlobService(account_name='seismicdata', account_key=os.environ['SPASS_DATA_BLOB_KEY'])
seismic_blob.create_container('seismic-data')
seismic_blob.set_container_acl('seismic-data', public_access=PublicAccess.Container)
seismic_blob.create_container('seismic-tools')
seismic_blob.set_container_acl('seismic-tools', public_access=PublicAccess.Container)
seismic_blob.create_container('seismic-results')
seismic_blob.set_container_acl('seismic-results', public_access=PublicAccess.Container)
celery = Celery(app.name, broker=os.environ['SPASS_CELERY_BROKER'], backend=os.environ['SPASS_CELERY_BROKER'])
@celery.task
@app.route("/healthz")
@app.route("/api/users/create/", methods=['POST'])
@app.route("/api/users/delete/", methods=['DELETE'])
@app.route("/api/users/", methods=['GET'])
@app.route("/api/users/authenticate/", methods=['POST'])
@app.route("/api/tasks/parameters/<tool_name>/", methods=['GET'])
@app.route("/api/tasks/submit/", methods=['POST'])
@app.route("/api/results/")
@app.route("/api/status/")
@app.route("/api/results/<id>")
@app.route('/api/data/upload/', methods=['POST'])
@app.route('/api/data/', methods=['GET'])
@app.route('/api/tools/', methods=['GET'])
@app.route('/api/tools/upload/', methods=['POST'])
@app.route('/api/tools/<name>/', methods=['DELETE'])
@app.route('/api/data/<name>/', methods=['DELETE'])
if __name__ == "__main__":
app.run('0.0.0.0', 5000) | [
6738,
42903,
1330,
46947,
11,
2581,
11,
18261,
201,
198,
11748,
33918,
201,
198,
11748,
279,
4948,
25162,
201,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
201,
198,
6738,
275,
1559,
13,
17752,
62,
22602,
1330,
45514,
11,
15989,
2... | 2.425065 | 774 |
# -*- coding: utf-8 -*-
from square.api_helper import APIHelper
from square.http.api_response import ApiResponse
from square.api.base_api import BaseApi
from square.http.auth.o_auth_2 import OAuth2
class TerminalApi(BaseApi):
"""A Controller to access Endpoints in the square API."""
def create_terminal_checkout(self,
body):
"""Does a POST request to /v2/terminals/checkouts.
Creates a Terminal checkout request and sends it to the specified
device to take a payment
for the requested amount.
Args:
body (CreateTerminalCheckoutRequest): An object containing the
fields to POST for the request. See the corresponding object
definition for field details.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/checkouts'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def search_terminal_checkouts(self,
body):
"""Does a POST request to /v2/terminals/checkouts/search.
Retrieves a filtered list of Terminal checkout requests created by the
account making the request.
Args:
body (SearchTerminalCheckoutsRequest): An object containing the
fields to POST for the request. See the corresponding object
definition for field details.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/checkouts/search'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def get_terminal_checkout(self,
checkout_id):
"""Does a GET request to /v2/terminals/checkouts/{checkout_id}.
Retrieves a Terminal checkout request by `checkout_id`.
Args:
checkout_id (string): The unique ID for the desired
`TerminalCheckout`.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/checkouts/{checkout_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'checkout_id': {'value': checkout_id, 'encode': True}
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def cancel_terminal_checkout(self,
checkout_id):
"""Does a POST request to /v2/terminals/checkouts/{checkout_id}/cancel.
Cancels a Terminal checkout request if the status of the request
permits it.
Args:
checkout_id (string): The unique ID for the desired
`TerminalCheckout`.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/checkouts/{checkout_id}/cancel'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'checkout_id': {'value': checkout_id, 'encode': True}
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def create_terminal_refund(self,
body):
"""Does a POST request to /v2/terminals/refunds.
Creates a request to refund an Interac payment completed on a Square
Terminal.
Args:
body (CreateTerminalRefundRequest): An object containing the
fields to POST for the request. See the corresponding object
definition for field details.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/refunds'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def search_terminal_refunds(self,
body):
"""Does a POST request to /v2/terminals/refunds/search.
Retrieves a filtered list of Interac Terminal refund requests created
by the seller making the request.
Args:
body (SearchTerminalRefundsRequest): An object containing the
fields to POST for the request. See the corresponding object
definition for field details.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/refunds/search'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def get_terminal_refund(self,
terminal_refund_id):
"""Does a GET request to /v2/terminals/refunds/{terminal_refund_id}.
Retrieves an Interac Terminal refund object by ID.
Args:
terminal_refund_id (string): The unique ID for the desired
`TerminalRefund`.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/refunds/{terminal_refund_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'terminal_refund_id': {'value': terminal_refund_id, 'encode': True}
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def cancel_terminal_refund(self,
terminal_refund_id):
"""Does a POST request to /v2/terminals/refunds/{terminal_refund_id}/cancel.
Cancels an Interac Terminal refund request by refund request ID if the
status of the request permits it.
Args:
terminal_refund_id (string): The unique ID for the desired
`TerminalRefund`.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/terminals/refunds/{terminal_refund_id}/cancel'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'terminal_refund_id': {'value': terminal_refund_id, 'encode': True}
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
6616,
13,
15042,
62,
2978,
525,
1330,
7824,
47429,
198,
6738,
6616,
13,
4023,
13,
15042,
62,
26209,
1330,
5949,
72,
31077,
198,
6738,
6616,
13,
15042,
13,
8... | 2.322189 | 6,431 |
#!/usr/bin/env python3
import argparse
import subprocess
import sys
import os
from pathlib import Path
import get_paths
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
651,
62,
6978,
82,
628,
628,
628,
198,
1... | 2.830508 | 59 |
"""
=================================
Parallel reconstruction using CSD
=================================
This example shows how to use parallelism (multiprocessing) using
``peaks_from_model`` in order to speedup the signal reconstruction
process. For this example will we use the same initial steps
as we used in :ref:`example_reconst_csd`.
Import modules, fetch and read data, apply the mask and calculate the response
function.
"""
import multiprocessing
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
data = img.get_data()
from dipy.segment.mask import median_otsu
maskdata, mask = median_otsu(data, 3, 1, False,
vol_idx=range(10, 50), dilate=2)
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, maskdata, roi_radius=10, fa_thr=0.7)
data = maskdata[:, :, 33:37]
mask = mask[:, :, 33:37]
"""
Now we are ready to import the CSD model and fit the datasets.
"""
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
csd_model = ConstrainedSphericalDeconvModel(gtab, response)
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
"""
Compute the CSD-based ODFs using ``peaks_from_model``. This function has a
parameter called ``parallel`` which allows for the voxels to be processed in
parallel. If ``nbr_processes`` is None it will figure out automatically the
number of CPUs available in your system. Alternatively, you can set
``nbr_processes`` manually. Here, we show an example where we compare the
duration of execution with or without parallelism.
"""
import time
from dipy.direction import peaks_from_model
start_time = time.time()
csd_peaks_parallel = peaks_from_model(model=csd_model,
data=data,
sphere=sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=mask,
return_sh=True,
return_odf=False,
normalize_peaks=True,
npeaks=5,
parallel=True,
nbr_processes=None)
time_parallel = time.time() - start_time
print("peaks_from_model using " + str(multiprocessing.cpu_count())
+ " process ran in :" + str(time_parallel) + " seconds")
"""
``peaks_from_model`` using 8 processes ran in :114.425682068 seconds
"""
start_time = time.time()
csd_peaks = peaks_from_model(model=csd_model,
data=data,
sphere=sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=mask,
return_sh=True,
return_odf=False,
normalize_peaks=True,
npeaks=5,
parallel=False,
nbr_processes=None)
time_single = time.time() - start_time
print("peaks_from_model ran in :" + str(time_single) + " seconds")
"""
``peaks_from_model`` ran in :242.772505999 seconds
"""
print("Speedup factor : " + str(time_single / time_parallel))
"""
Speedup factor : 2.12166099088
In Windows if you get a runtime error about frozen executable please start
your script by adding your code above in a ``main`` function and use:
if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support()
main()
"""
| [
37811,
198,
10052,
28,
198,
10044,
29363,
25056,
1262,
9429,
35,
198,
10052,
28,
198,
198,
1212,
1672,
2523,
703,
284,
779,
10730,
1042,
357,
16680,
541,
305,
919,
278,
8,
1262,
198,
15506,
431,
4730,
62,
6738,
62,
19849,
15506,
287,
... | 2.137143 | 1,750 |
from factory import Faker, make_factory
from factory.alchemy import SQLAlchemyModelFactory
from lms import models
from tests.factories.attributes import (
H_DISPLAY_NAME,
H_USERNAME,
OAUTH_CONSUMER_KEY,
RESOURCE_LINK_ID,
USER_ID,
)
GradingInfo = make_factory(
models.GradingInfo,
FACTORY_CLASS=SQLAlchemyModelFactory,
lis_result_sourcedid=Faker("numerify", text="test_lis_result_sourcedid_#"),
lis_outcome_service_url=Faker(
"numerify", text="https://example.com/test-lis-outcome-service-url-#"
),
oauth_consumer_key=OAUTH_CONSUMER_KEY,
user_id=USER_ID,
context_id=Faker("hexify", text="^" * 32),
resource_link_id=RESOURCE_LINK_ID,
tool_consumer_info_product_family_code=Faker(
"random_element",
elements=["BlackBoardLearn", "moodle", "canvas", "sakai", "desire2learn"],
),
h_username=H_USERNAME,
h_display_name=H_DISPLAY_NAME,
)
| [
6738,
8860,
1330,
376,
3110,
11,
787,
62,
69,
9548,
198,
6738,
8860,
13,
282,
26599,
1330,
16363,
2348,
26599,
17633,
22810,
198,
198,
6738,
300,
907,
1330,
4981,
198,
6738,
5254,
13,
22584,
1749,
13,
1078,
7657,
1330,
357,
198,
220,
... | 2.314214 | 401 |
# Generated by Django 3.0.4 on 2020-03-31 16:04
from django.db import migrations, models
import markdownx.models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
3070,
12,
3132,
1467,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
1317,
2902,
87,
13,
27530,
628
] | 2.948718 | 39 |
import json
import re
import threading
import websocket
from platypush.backend import Backend
from platypush.message.event.music import MusicPlayEvent, MusicPauseEvent, \
MusicStopEvent, NewPlayingTrackEvent, PlaylistChangeEvent, VolumeChangeEvent, \
PlaybackConsumeModeChangeEvent, PlaybackSingleModeChangeEvent, \
PlaybackRepeatModeChangeEvent, PlaybackRandomModeChangeEvent, \
MuteChangeEvent, SeekChangeEvent
# noinspection PyUnusedLocal
class MusicMopidyBackend(Backend):
"""
This backend listens for events on a Mopidy music server streaming port.
Since this backend leverages the Mopidy websocket interface it is only
compatible with Mopidy and not with other MPD servers. Please use the
:class:`platypush.backend.music.mpd.MusicMpdBackend` for a similar polling
solution if you're not running Mopidy or your instance has the websocket
interface or web port disabled.
Triggers:
* :class:`platypush.message.event.music.MusicPlayEvent` if the playback state changed to play
* :class:`platypush.message.event.music.MusicPauseEvent` if the playback state changed to pause
* :class:`platypush.message.event.music.MusicStopEvent` if the playback state changed to stop
* :class:`platypush.message.event.music.NewPlayingTrackEvent` if a new track is being played
* :class:`platypush.message.event.music.PlaylistChangeEvent` if the main playlist has changed
* :class:`platypush.message.event.music.VolumeChangeEvent` if the main volume has changed
* :class:`platypush.message.event.music.MuteChangeEvent` if the mute status has changed
* :class:`platypush.message.event.music.SeekChangeEvent` if a track seek event occurs
Requires:
* Mopidy installed and the HTTP service enabled
"""
@staticmethod
# vim:sw=4:ts=4:et:
| [
11748,
33918,
198,
11748,
302,
198,
11748,
4704,
278,
198,
198,
11748,
2639,
5459,
198,
198,
6738,
40315,
4464,
1530,
13,
1891,
437,
1330,
5157,
437,
198,
6738,
40315,
4464,
1530,
13,
20500,
13,
15596,
13,
28965,
1330,
7849,
11002,
9237... | 3.062603 | 607 |
from pkg import Foo
foo = Foo | [
6738,
279,
10025,
1330,
36080,
198,
198,
21943,
796,
36080
] | 3 | 10 |
#!/usr/bin/env python3
from mutagen.mp4 import MP4, MP4Cover
from pathlib import Path
import re
album_dir = Path('.')
artist, album, date = re.fullmatch('(.+) - (.+) \((\d{4})\)', album_dir.resolve().name).groups()
tracks = [p.name for p in album_dir.glob('*.m4a')]
total_tracks = len(tracks)
with open('cover.jpg', 'rb') as f:
cover = MP4Cover(f.read())
for track in tracks:
no, title = re.fullmatch('(\d{2})\.(.+)\.m4a', track).groups()
track_no = int(no)
mp4 = MP4(track)
mp4.tags['\xa9nam'] = title
mp4.tags['\xa9ART'] = artist
mp4.tags['\xa9alb'] = album
mp4.tags['\xa9day'] = date
mp4.tags['trkn'] = ((track_no, total_tracks),)
mp4.tags['covr'] = [cover]
mp4.save()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
4517,
11286,
13,
3149,
19,
1330,
4904,
19,
11,
4904,
19,
27245,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
302,
198,
198,
40916,
62,
15908,
796,
10644,
10786,
2637,
8,
... | 2.257053 | 319 |
from hyper.http20.huffman import HuffmanDecoder, HuffmanEncoder
from hyper.http20.huffman_constants import REQUEST_CODES,REQUEST_CODES_LENGTH
| [
6738,
8718,
13,
4023,
1238,
13,
71,
1648,
805,
1330,
14721,
805,
10707,
12342,
11,
14721,
805,
27195,
12342,
198,
6738,
8718,
13,
4023,
1238,
13,
71,
1648,
805,
62,
9979,
1187,
1330,
4526,
35780,
62,
34,
3727,
1546,
11,
2200,
35780,
... | 2.86 | 50 |
# Copyright 2018 Dinar Gabbasov <https://it-projects.info/team/GabbasovDinar>
# License MIT (https://opensource.org/licenses/MIT).
from odoo import fields, models
| [
2,
15069,
2864,
360,
22050,
402,
6485,
292,
709,
1279,
5450,
1378,
270,
12,
42068,
13,
10951,
14,
15097,
14,
38,
6485,
292,
709,
35,
22050,
29,
198,
2,
13789,
17168,
357,
5450,
1378,
44813,
1668,
13,
2398,
14,
677,
4541,
14,
36393,
... | 3.09434 | 53 |
from django.apps import AppConfig
class MusicPublisherConfig(AppConfig):
"""Configuration for Music Publisher app.
Attributes:
label (str): app label
name (str): app name
verbose_name (str): app verbose name
"""
name = 'music_publisher'
label = 'music_publisher'
verbose_name = 'Music Publisher'
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628,
198,
4871,
7849,
46471,
16934,
7,
4677,
16934,
2599,
628,
220,
220,
220,
37227,
38149,
329,
7849,
28045,
598,
13,
628,
220,
220,
220,
49213,
25,
198,
220,
220,
220,
220,
220,
220,... | 2.792 | 125 |
"""
script for data preprocessing
"""
import os
import shutil
import pandas as pd
from config.cfg import cfg
def process_gender_imgs():
"""
process gender images
:return:
"""
m_filenames, f_filenames = split_by_attribute('gender')
if not os.path.exists(os.path.join(cfg['gender_base_dir'], 'M')):
os.makedirs(os.path.join(cfg['gender_base_dir'], 'M'))
if not os.path.exists(os.path.join(cfg['gender_base_dir'], 'F')):
os.makedirs(os.path.join(cfg['gender_base_dir'], 'F'))
for m_f in m_filenames:
shutil.copy(m_f, os.path.join(cfg['gender_base_dir'], 'M', os.path.basename(m_f)))
for f_f in f_filenames:
shutil.copy(f_f, os.path.join(cfg['gender_base_dir'], 'F', os.path.basename(f_f)))
def process_race_imgs():
"""
process race images
:return:
"""
w_filenames, y_filenames = split_by_attribute('race')
if not os.path.exists(os.path.join(cfg['race_base_dir'], 'W')):
os.makedirs(os.path.join(cfg['race_base_dir'], 'W'))
if not os.path.exists(os.path.join(cfg['race_base_dir'], 'Y')):
os.makedirs(os.path.join(cfg['race_base_dir'], 'Y'))
for w_f in w_filenames:
shutil.copy(w_f, os.path.join(cfg['race_base_dir'], 'W', os.path.basename(w_f)))
for y_f in y_filenames:
shutil.copy(y_f, os.path.join(cfg['race_base_dir'], 'Y', os.path.basename(y_f)))
| [
37811,
198,
12048,
329,
1366,
662,
36948,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
4566,
13,
37581,
1330,
30218,
70,
628,
198,
198,
4299,
1429,
62,
8388,
62,... | 2.126524 | 656 |
"""
The static source provides a column for inserting static data. It's incredibly
simple and not terribly useful, but it can come in handy from time to time.
For example, if you want to fill a column with a 'coming soon...' message, you
could use a static column.
"""
from __future__ import absolute_import
from blingalytics import sources
class Value(StaticColumn):
"""
Returns a given value for each row in the report. In addition to the
standard column options, it takes one positional argument, which is the
static value to return for every row.
"""
| [
37811,
198,
464,
9037,
2723,
3769,
257,
5721,
329,
19319,
9037,
1366,
13,
632,
338,
8131,
198,
36439,
290,
407,
22121,
4465,
11,
475,
340,
460,
1282,
287,
15728,
422,
640,
284,
640,
13,
198,
1890,
1672,
11,
611,
345,
765,
284,
6070,... | 3.879195 | 149 |
from spacy_transformers import TransformerData
import srsly
| [
6738,
599,
1590,
62,
35636,
364,
1330,
3602,
16354,
6601,
198,
11748,
264,
3808,
306,
628
] | 3.8125 | 16 |
""" Video playback from sdcard/pico flash/eve's connected flash example """
import sys
import video2 as demo
if __name__ == "__main__":
sdcard, eve = eve_init()
eve.calibrate()
while 1:
demo.start(sdcard, eve) | [
37811,
7623,
16388,
422,
45647,
9517,
14,
79,
3713,
7644,
14,
44655,
338,
5884,
7644,
1672,
37227,
198,
198,
11748,
25064,
198,
11748,
2008,
17,
355,
13605,
198,
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
8... | 2.526316 | 95 |
# -*- coding: utf-8 -*-
from ckan.cli.cli import ckan
def test_build_and_clean(cli, ckan_config, tmpdir, monkeypatch):
"""After build, there are some folders with assets inside
`%(ckan.storage_path)/webassets`. And after cleaning they must
be empty.
"""
monkeypatch.setitem(ckan_config, u'ckan.storage_path', str(tmpdir))
cli.invoke(ckan, [u'asset', u'build'])
assert len(tmpdir.listdir()) == 1
webassets_folder = tmpdir.listdir()[0]
assert webassets_folder.basename == u'webassets'
for folder in webassets_folder.listdir():
if not folder.isdir():
continue
assert folder.listdir()
cli.invoke(ckan, [u'asset', u'clean'])
for folder in webassets_folder.listdir():
if not folder.isdir():
continue
assert not folder.listdir()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
6738,
269,
27541,
13,
44506,
13,
44506,
1330,
269,
27541,
628,
198,
4299,
1332,
62,
11249,
62,
392,
62,
27773,
7,
44506,
11,
269,
27541,
62,
11250,
11,
45218,
159... | 2.447059 | 340 |
"""Pydantic can convert a model instance to a dictionary, or json string"""
from pydantic_lib.utils import Movie
movie = Movie(title='Friday the 13th', year_of_release=1980, rating='R', box_office='U$93M')
movie_as_dict = movie.dict(exclude={'cast'})
movie_as_json = movie.json(exclude={'cast'})
print(movie_as_dict)
print(movie_as_json)
| [
37811,
47,
5173,
5109,
460,
10385,
257,
2746,
4554,
284,
257,
22155,
11,
393,
33918,
4731,
37811,
198,
6738,
279,
5173,
5109,
62,
8019,
13,
26791,
1330,
15875,
198,
198,
41364,
796,
15875,
7,
7839,
11639,
20610,
262,
1511,
400,
3256,
... | 2.865546 | 119 |
from collections import Counter, defaultdict
import csv
import requests
CSV_URL = 'https://raw.githubusercontent.com/pybites/SouthParkData/master/by-season/Season-{}.csv' # noqa E501
def get_season_csv_file(season):
"""Receives a season int, and downloads loads in its
corresponding CSV_URL"""
with requests.Session() as s:
download = s.get(CSV_URL.format(season))
return download.content.decode('utf-8')
def get_num_words_spoken_by_character_per_episode(content):
"""Receives loaded csv content (str) and returns a dict of
keys=characters and values=Counter object,
which is a mapping of episode=>words spoken"""
d = defaultdict(Counter)
reader_list = csv.DictReader(content.splitlines())
for row in reader_list:
words = row['Line'].strip().split()
d[row['Character']][row['Episode']] += len(words)
return d
| [
6738,
17268,
1330,
15034,
11,
4277,
11600,
198,
11748,
269,
21370,
198,
198,
11748,
7007,
198,
198,
7902,
53,
62,
21886,
796,
705,
5450,
1378,
1831,
13,
12567,
43667,
13,
785,
14,
9078,
2545,
274,
14,
14942,
25478,
6601,
14,
9866,
14,... | 2.77709 | 323 |