id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
127371 | <filename>secret.py
# Change these to your instagram login credentials.
username = "Username"
password = "Password"
| StarcoderdataPython |
1733504 | #!/usr/bin/env python2
import fileinput
import re
TAB_LENGTH = 4
def prependTabs(string, numTabs=1):
return numTabs * TAB_LENGTH * ' ' + string
def getVariables(string):
regex = r'\$[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*'
return re.findall(regex, string)
def createVariableDeclaration(variable):
return prependTabs("private {};").format(variable)
def getVariableDeclarations(variables):
declarations = []
for variable in variables:
declarations.append(createVariableDeclaration(variable))
declarations.append('')
return declarations
def createPropertyInit(variable):
dollarLessVariable = variable.replace('$', '')
return prependTabs("$this->{0} = {1};",2).format(dollarLessVariable, variable)
def getConstructVariables(variables):
constructLines = [prependTabs('{')]
for variable in variables:
constructLines.append(createPropertyInit(variable))
constructLines.append(prependTabs('}'))
return constructLines
def finishConstruct(construct):
variables = getVariables(construct)
newLines = getVariableDeclarations(variables)
newLines.append(prependTabs(construct.strip()))
newLines.extend(getConstructVariables(variables))
return newLines
def main():
construct = fileinput.input()[0];
finished = finishConstruct(construct)
for line in finished:
print line
if __name__ == "__main__":
main()
| StarcoderdataPython |
1755081 | from .etl_helper import ETLHelper
from .neo4j_helper import Neo4jHelper
from .assembly_sequence_helper import AssemblySequenceHelper
from .obo_helper import OBOHelper
from .resource_descriptor_helper_2 import ResourceDescriptorHelper2
from .text_processing_helper import TextProcessingHelper
| StarcoderdataPython |
3393321 | """ Example showing how to use visual search APIs via python SDK.
"""
from pprint import pprint
from props import *
from cfapivisualsearch.sdk import VisualSearch
#------------------------------------------------------------------------------
# Initialize.
#------------------------------------------------------------------------------
vs = VisualSearch(api_gateway_url=props['api_gateway_url'],
api_key=props['api_key'],
version=props['api_version'],
data_collection_opt_out=props['data_collection_opt_out'])
# Check if the url and API key is valid.
status,response = vs.fashion_quote()
print(status)
pprint(response)
#------------------------------------------------------------------------------
# VISUAL SEARCH CATEGORIES PREDICTION
#------------------------------------------------------------------------------
"""
#Predict the visual search categories based on product images
status,response = vs.categories_predict(catalog_name=props['catalog_name'],
clear_cache=True,
ignore_non_primary_images=False,
visual_search_categories_threshold=0.0)
# Get the status of the visual search categories prediction
status,response = vs.categories_status(catalog_name=props['catalog_name'])
pprint(response)
#Delete the visual search categories prediction process
status,response = vs.categories_delete(catalog_name=props['catalog_name'])
pprint(response)
"""
# Get all visual search categories
status,response = vs.visual_search_categories(catalog_name=props['catalog_name'])
pprint(response)
# Get all visual browse categories
status,response = vs.visual_browse_categories(catalog_name=props['catalog_name'])
pprint(response)
#------------------------------------------------------------------------------
# BUILD VISUAL SEARCH INDEX
#------------------------------------------------------------------------------
"""
# Build the visual search index.
status,response = vs.index_build(catalog_name=props['catalog_name'],
per_category_index=False,
full_index=True,
group_by=False,
group_by_k=5)
pprint(response)
# Get the status of the visual search index.
status,response = vs.index_status(catalog_name=props['catalog_name'])
pprint(response)
# Delete the visual search index.
status,response = vs.index_delete(catalog_name=props['catalog_name'])
pprint(response)
"""
#------------------------------------------------------------------------------
# VISUAL BROWSE
#------------------------------------------------------------------------------
# Get other visually similar products in the catalog.
status,response = vs.browse(catalog_name=props['catalog_name'],
id='ABOFA15AMCWJG10449',
image_id='1',
max_number_of_results=5,
per_category_index=True,
category=None,
use_cache=False,
sort_option='visual_similarity',
unique_products=True)
pprint(response)
#------------------------------------------------------------------------------
# VISUAL SEARCH
#------------------------------------------------------------------------------
# Get visually similar products in the catalog for an uploaded image.
status,response = vs.search(catalog_name=props['catalog_name'],
image_filename='test_image.jpeg',
max_number_of_results=5,
per_category_index=True,
category=None,
visual_search_categories_threshold=0.0,
sort_option='visual_similarity',
group_by=None,
unique_products=True)
pprint(response)
| StarcoderdataPython |
3230972 | from boiler import bootstrap
from boiler.config import TestingConfig
from shiftuser.config import UserConfig
"""
Create app for testing
This is not a real application, we only use it to run tests against.
"""
class Config(TestingConfig, UserConfig):
USER_JWT_SECRET = 'typically will come from environment'
SECRET_KEY = 'supersecret'
# create app
app = bootstrap.create_app(__name__, config=Config(),)
bootstrap.add_orm(app)
bootstrap.add_mail(app)
bootstrap.add_routing(app)
| StarcoderdataPython |
1641057 | try:
from meu_grafo import MeuGrafo
except Exception as e:
print(e)
# Grafo da Paraíba
g_p = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
g_p.adicionaAresta('a1', 'J', 'C')
g_p.adicionaAresta('a2', 'C', 'E')
g_p.adicionaAresta('a3', 'C', 'E')
g_p.adicionaAresta('a4', 'P', 'C')
g_p.adicionaAresta('a5', 'P', 'C')
g_p.adicionaAresta('a6', 'T', 'C')
g_p.adicionaAresta('a7', 'M', 'C')
g_p.adicionaAresta('a8', 'M', 'T')
g_p.adicionaAresta('a9', 'T', 'Z')
# Grafo da Paraíba sem arestas paralelas
g_p_sem_paralelas = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
g_p_sem_paralelas.adicionaAresta('a1', 'J', 'C')
g_p_sem_paralelas.adicionaAresta('a2', 'C', 'E')
g_p_sem_paralelas.adicionaAresta('a3', 'P', 'C')
g_p_sem_paralelas.adicionaAresta('a4', 'T', 'C')
g_p_sem_paralelas.adicionaAresta('a5', 'M', 'C')
g_p_sem_paralelas.adicionaAresta('a6', 'M', 'T')
g_p_sem_paralelas.adicionaAresta('a7', 'T', 'Z')
# Grafos completos
g_c = MeuGrafo(['J', 'C', 'E', 'P'])
g_c.adicionaAresta('a1','J','C')
g_c.adicionaAresta('a2', 'J', 'E')
g_c.adicionaAresta('a3', 'J', 'P')
g_c.adicionaAresta('a4', 'E', 'C')
g_c.adicionaAresta('a5', 'P', 'C')
g_c.adicionaAresta('a6', 'P', 'E')
# Grafos não direcionados
g_nd = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
g_nd.adicionaAresta('a1', 'A', 'B')
g_nd.adicionaAresta('a2', 'A', 'G')
g_nd.adicionaAresta('a3', 'A', 'J')
g_nd.adicionaAresta('a4', 'G', 'K')
g_nd.adicionaAresta('a5', 'J', 'K')
g_nd.adicionaAresta('a6', 'G', 'J')
g_nd.adicionaAresta('a7', 'I', 'J')
g_nd.adicionaAresta('a8', 'G', 'I')
g_nd.adicionaAresta('a9', 'G', 'H')
g_nd.adicionaAresta('a10', 'F', 'H')
g_nd.adicionaAresta('a11', 'B', 'F')
g_nd.adicionaAresta('a12', 'B', 'G')
g_nd.adicionaAresta('a13', 'B', 'C')
g_nd.adicionaAresta('a14', 'C', 'D')
g_nd.adicionaAresta('a15', 'D', 'E')
g_nd.adicionaAresta('a16', 'B', 'D')
g_nd.adicionaAresta('a17', 'B', 'E')
| StarcoderdataPython |
1752333 | # Write your solution for 1.2 here!
sum =0
for i in range(101):
if(i%2==0):
sum+=i
print(sum) | StarcoderdataPython |
1685952 | <reponame>enhatem/quadrotor_mpc_acados
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
def trajectory_generator(T_final, N, traj=0, show_traj=False):
'''
Generates a circular trajectory given a final time and a sampling time
'''
r = 0.5 # radius
th = np.linspace(0,2*np.pi,N)
c_x, c_y = [0,0] # center coordinates
## circular trajectory
if traj ==0:
t = np.linspace(0,T_final,N)
x = r * np.cos(th) + c_x
y = r * np.sin(th) + c_y
z = np.ones_like(th)
if show_traj == True:
plt.figure()
ax = plt.axes(projection = "3d")
plt.title('Reference trajectory')
ax.plot3D(x, y, z)
ax.set_xlabel("x[m]")
ax.set_ylabel("y[m]")
ax.set_zlabel("z[m]")
plt.show()
## hellical trajectory
if traj ==1:
t = np.linspace(0,T_final,N)
x = r * np.cos(th) + c_x
y = r * np.sin(th) + c_y
z = np.linspace(1,2,N)
if show_traj == True:
plt.figure()
ax = plt.axes(projection = "3d")
plt.title('Reference trajectory')
ax.plot3D(x, y, z)
plt.show()
return t,x,y,z | StarcoderdataPython |
3385728 | #!/usr/bin/env python3
import numpy as np
import scipy.linalg
try:
import lib.metrics as metrics
except ModuleNotFoundError:
import metrics
__all__ = ["OLSRegression", "RidgeRegression", "LassoRegression"]
class __RegBackend:
"""Backend class in case we want to run with either scipy, numpy
(or something else)."""
_fit_performed = False
__possible_backends = ["numpy", "scipy"]
__possible_inverse_methods = ["inv", "svd"]
def __init__(self, linalg_backend="scipy", inverse_method="svd"):
"""Sets up the linalg backend."""
assert linalg_backend in self.__possible_backends, \
"{:s} backend not recognized".format(str(linalg_backend))
self.linalg_backend = linalg_backend
assert inverse_method in self.__possible_inverse_methods, \
"{:s} inverse method not recognized".format(str(inverse_method))
self.inverse_method = inverse_method
def fit(self, X_train, y_train):
raise NotImplementedError("Derived class missing fit()")
def _inv(self, M):
"""Method for taking derivatives with either numpy or scipy."""
if self.linalg_backend == "numpy":
if self.inverse_method == "inv":
return np.linalg.inv(M)
elif self.inverse_method == "svd":
U, S, VH = np.linalg.svd(M)
S = np.diag(1.0/S)
return U @ S @ VH
elif self.linalg_backend == "scipy":
if self.inverse_method == "inv":
return scipy.linalg.inv(M)
elif self.inverse_method == "svd":
U, S, VH = scipy.linalg.svd(M)
S = np.diag(1.0/S)
return U @ S @ VH
def _check_if_fitted(self):
"""Small check if fit has been performed."""
assert self._fit_performed, "Fit not performed"
def score(self, y_true, y_test):
"""Returns the R^2 score.
Args:
y_test (ndarray): X array of shape (N, p - 1) to test for
y_true (ndarray): true values for X
Returns:
float: R2 score for X_test values.
"""
return metrics.R2(y_true, y_test)
def beta_variance(self):
"""Returns the variance of beta."""
self._check_if_fitted()
return self.coef_var
def get_y_variance(self):
if hasattr(self, "y_variance"):
return self.y_variance
else:
raise AttributeError(
("Class {:s} does not contain "
"y_variance.".format(self.__class__)))
def predict(self, X_test):
"""Performs a prediction for given beta coefs.
Args:
X_test (ndarray): test samples, size (N, p - 1)
Returns:
ndarray: test values for X_test
"""
self._check_if_fitted()
return X_test @ self.coef
def get_results(self):
"""Method for retrieving results from fit.
Returns:
y_approx (ndarray): y approximated on training data x.
beta (ndarray): the beta fit paramters.
beta_cov (ndarray): covariance matrix of the beta values.
beta_var (ndarray): variance of the beta values.
eps (ndarray): the residues of y_train and y_approx.
"""
return self.y_approx, self.coef, self.coef_cov, self.coef_var, self.eps
@property
def coef_(self):
return self.coef
@coef_.getter
def coef_(self):
return self.coef
@coef_.setter
def coef_(self, value):
self.coef = value
@property
def coef_var(self):
return self.beta_coefs_var
@coef_var.getter
def coef_var(self):
return self.beta_coefs_var
@coef_var.setter
def coef_var(self, value):
self.beta_coefs_var = value
class OLSRegression(__RegBackend):
"""
An implementation of linear regression.
Performs a fit on p features and s samples.
"""
def __init__(self, **kwargs):
"""Initilizer for Linear Regression
Args:
linalg_backend (str): optional, default is "numpy". Choices:
numpy, scipy.
inverse_method (str): optional, default is "svd". Choices:
svd, inv.
"""
super().__init__(**kwargs)
self.X_train = None
self.y_train = None
def fit(self, X_train, y_train):
"""Fits/trains y_train with X_train using Linear Regression.
X_train given as [1, x, x*2, ...]
Args:
X_train (ndarray): design matrix, (N, p - 1),
y_train (ndarray): (N),
"""
self.X_train = X_train
self.y_train = y_train
# N samples, P features
self.N, self.P = X_train.shape
# X^T * X
self.XTX = self.X_train.T @ self.X_train
# (X^T * X)^{-1}
self.XTX_inv = self._inv(self.XTX)
# Beta fit values: beta = (X^T * X)^{-1} @ X^T @ y
self.coef = self.XTX_inv @ self.X_train.T @ self.y_train
# y approximate. X @ beta
self.y_approx = self.X_train @ self.coef
# Residues.
self.eps = self.y_train - self.y_approx
# Variance of y approximate values. sigma^2, unbiased
# self.y_variance = np.sum(self.eps**2) / float(self.N)
self.y_variance = np.sum(self.eps**2) / (self.N - self.P - 1)
# Beta fit covariance/variance. (X^T * X)^{-1} * sigma^2
self.coef_cov = self.XTX_inv * self.y_variance
self.coef_var = np.diag(self.coef_cov)
self._fit_performed = True
class RidgeRegression(__RegBackend):
"""
An implementation of ridge regression.
"""
def __init__(self, alpha=1.0, **kwargs):
"""A method for Ridge Regression.
Args:
alpha (float): alpha/lambda to use in Ridge Regression.
"""
super().__init__(**kwargs)
self.alpha = alpha
self.X_train = None
self.y_train = None
def fit(self, X_train, y_train):
"""Fits/trains y_train with X_train using Ridge Regression.
X_train given as [1, x, x*2, ...]
Args:
X_train (ndarray): design matrix, (N, p - 1),
y_train (ndarray): (N, 1),
"""
self.X_train = X_train
self.y_train = y_train
# N samples, P features
self.N, self.P = X_train.shape
# X^T * X
self.XTX = self.X_train.T @ self.X_train
# (X^T * X)^{-1}
self.XTX_aI = self.XTX + self.alpha*np.eye(self.XTX.shape[0])
self.XTX_aI_inv = self._inv(self.XTX_aI)
# Beta fit values: beta = (X^T * X)^{-1} @ X^T @ y
self.coef = self.XTX_aI_inv @ self.X_train.T @ self.y_train
# y approximate. X @ beta
self.y_approx = self.X_train @ self.coef
# Residues.
self.eps = self.y_train - self.y_approx
# Variance of y approximate values. sigma^2, unbiased
# self.y_variance = metrics.mse(self.y_train, self.y_approx)
self.y_variance = np.sum(self.eps**2) / (self.N - self.P - 1)
# Beta fit covariance/variance.
# See page 10 section 1.4 in https://arxiv.org/pdf/1509.09169.pdf
# **REMEMBER TO CITE THIS/DERIVE THIS YOURSELF!**
self.coef_cov = self.XTX_aI_inv @ self.XTX @ self.XTX_aI_inv.T
self.coef_cov *= self.y_variance
self.coef_var = np.diag(self.coef_cov)
self._fit_performed = True
class LassoRegression(__RegBackend):
"""
An implementation of lasso regression.
"""
def __init__(self, alpha=1.0, **kwargs):
"""A method for Lasso Regression.
Args:
alpha (float): alpha/lambda to use in Lasso Regression.
"""
super().__init__(**kwargs)
self.alpha = alpha
self.X_train = None
self.y_train = None
raise NotImplementedError
def fit(self, X_train, y_train):
raise NotImplementedError
def __test_ols_regression(x, y, deg):
print("Testing OLS for degree={}".format(deg))
import sklearn.preprocessing as sk_preproc
poly = sk_preproc.PolynomialFeatures(degree=deg, include_bias=True)
X = poly.fit_transform(x, y)
reg = OLSRegression()
reg.fit(X, y)
print("R^2: {}".format(reg.score(y, reg.predict(X))))
def __test_ridge_regression(x, y, deg, alpha=1.0):
print("Testing Ridge for degree={} for alpha={}".format(deg, alpha))
import sklearn.preprocessing as sk_preproc
poly = sk_preproc.PolynomialFeatures(degree=deg, include_bias=True)
X = poly.fit_transform(x, y)
reg = RidgeRegression(alpha=alpha)
reg.fit(X, y)
print("R^2: {}".format(reg.score(y, reg.predict(X))))
def __test_regresssions():
n = 100 # n cases, i = 0,1,2,...n-1
deg = 8
noise_strength = 0.1
np.random.seed(1)
x = np.random.rand(n, 1)
y = 5.0*x*x + np.exp(-x*x) + noise_strength*np.random.randn(n, 1)
__test_ols_regression(x, y, deg)
for alpha_ in [1e-4, 1e-3, 1e-2, 1e-1, 1e1, 1e2]:
__test_ridge_regression(x, y, deg, alpha_)
if __name__ == '__main__':
__test_regresssions()
| StarcoderdataPython |
4816952 | <gh_stars>10-100
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import json
import os
import tensorflow as tf
from util.string_utils import natural_keys
from util.utils import AttrDict
def tf_open_file_in_path(path, filename_in_path, mode='w'):
"""
Automatically creates path if the path doesn't exist,
then open filename_in_path with mode,
and write content
"""
filepath = os.path.join(path, filename_in_path)
if not tf.gfile.Exists(filepath):
if not tf.gfile.Exists(path):
tf.gfile.MakeDirs(path)
return tf.gfile.GFile(filepath, mode)
def load_json_as_attrdict(json_file):
return json.load(tf_open_file_in_path("", json_file, "r"), object_pairs_hook=AttrDict)
### Code below originated from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/utils.py
### Modified by thnkinbtfly
def archive_ckpt(ckpt_eval_result_dict, ckpt_objective, ckpt_dir, keep_archives=2):
"""Archive a checkpoint and ckpt before, if the metric is better."""
archive_dir = 'archive'
archive_oldest_available_dir = 'archive_oldest_available'
saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')
if not check_is_improved(ckpt_objective, saved_objective_path):
return False
all_ckpts_available = get_ckpt_old_to_new(ckpt_dir)
latest_ckpt = all_ckpts_available[-1]
if not update_one_ckpt_and_remove_old_ones(latest_ckpt, os.path.join(ckpt_dir, archive_dir),
keep_archives, ckpt_eval_result_dict):
return False
oldest_ckpt_available = all_ckpts_available[0]
if not update_one_ckpt_and_remove_old_ones(oldest_ckpt_available,
os.path.join(ckpt_dir, archive_oldest_available_dir),
keep_archives):
return False
# Update the best objective.
with tf.gfile.GFile(saved_objective_path, 'w') as f:
f.write('%f' % ckpt_objective)
return True
def check_is_improved(ckpt_objective, saved_objective_path):
saved_objective = float('-inf')
if tf.gfile.Exists(saved_objective_path):
with tf.gfile.GFile(saved_objective_path, 'r') as f:
saved_objective = float(f.read())
if saved_objective > ckpt_objective:
tf.logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective)
return False
else:
return True
def get_ckpt_old_to_new(target_dir):
"""Returns ckpt names from newest to oldest. Returns [] if nothing exists"""
prev_ckpt_state = tf.train.get_checkpoint_state(target_dir)
all_ckpts = []
if prev_ckpt_state:
all_ckpts = sorted(prev_ckpt_state.all_model_checkpoint_paths, key=natural_keys, reverse=False)
tf.logging.info('got all_model_ckpt_paths %s' % str(all_ckpts))
return all_ckpts
def update_one_ckpt_and_remove_old_ones(ckpt_name_path, dst_dir, num_want_to_keep_ckpts, ckpt_eval_result_dict=""):
"""
:param ckpt_eval_result_dict: provide a evaluation informations if you want to write there.
"""
filenames = tf.gfile.Glob(ckpt_name_path + '.*')
if filenames is None:
tf.logging.info('No files to copy for checkpoint %s', ckpt_name_path)
return False
tf.gfile.MakeDirs(dst_dir)
num_want_to_keep_prev_ckpts = num_want_to_keep_ckpts - 1
remaining_ckpts = remove_old_ckpts_and_get_remaining_names(
dst_dir, num_want_to_keep_ckpts=num_want_to_keep_prev_ckpts)
write_ckpts(ckpt_name_path, dst_dir, remaining_ckpts)
if ckpt_eval_result_dict:
with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:
f.write('%s' % ckpt_eval_result_dict)
return True
def remove_old_ckpts_and_get_remaining_names(dst_dir, num_want_to_keep_ckpts):
# Remove old ckpt files. get_checkpoint_state returns absolute path. refer to
# https://git.codingcafe.org/Mirrors/tensorflow/tensorflow/commit/2843a7867d51c2cf065b85899ea0b9564e4d9db9
all_ckpts = get_ckpt_old_to_new(dst_dir)
if all_ckpts:
want_to_rm_ckpts = all_ckpts[:-num_want_to_keep_ckpts]
for want_to_rm_ckpt in want_to_rm_ckpts:
want_to_rm = tf.gfile.Glob(want_to_rm_ckpt + "*")
for f in want_to_rm:
tf.logging.info('Removing checkpoint %s', f)
tf.gfile.Remove(f)
remaining_ckpts = all_ckpts[-num_want_to_keep_ckpts:]
else:
remaining_ckpts = []
return remaining_ckpts
def write_ckpts(ckpt_path, dst_dir, remaining_ckpts):
filenames = tf.gfile.Glob(ckpt_path + '.*')
tf.logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir)
for f in filenames:
dest = os.path.join(dst_dir, os.path.basename(f))
tf.gfile.Copy(f, dest, overwrite=True)
ckpt_state = tf.train.generate_checkpoint_state_proto(
dst_dir,
model_checkpoint_path=ckpt_path,
all_model_checkpoint_paths=remaining_ckpts)
with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:
str_ckpt_state = str(ckpt_state)
str_ckpt_state = str_ckpt_state.replace('../', '')
tf.logging.info('str_ckpt_state %s' % str_ckpt_state)
f.write(str_ckpt_state)
| StarcoderdataPython |
22276 | ##
## Evaluation Script
##
import numpy as np
import time
from sample_model import Model
from data_loader import data_loader
from generator import Generator
def evaluate(label_indices = {'brick': 0, 'ball': 1, 'cylinder': 2},
channel_means = np.array([147.12697, 160.21092, 167.70029]),
data_path = '../data',
minibatch_size = 32,
num_batches_to_test = 10,
checkpoint_dir = 'tf_data/sample_model'):
print("1. Loading data")
data = data_loader(label_indices = label_indices,
channel_means = channel_means,
train_test_split = 0.5,
data_path = data_path)
print("2. Instantiating the model")
M = Model(mode = 'test')
#Evaluate on test images:
GT = Generator(data.test.X, data.test.y, minibatch_size = minibatch_size)
num_correct = 0
num_total = 0
print("3. Evaluating on test images")
for i in range(num_batches_to_test):
GT.generate()
yhat = M.predict(X = GT.X, checkpoint_dir = checkpoint_dir)
correct_predictions = (np.argmax(yhat, axis = 1) == np.argmax(GT.y, axis = 1))
num_correct += np.sum(correct_predictions)
num_total += len(correct_predictions)
accuracy = round(num_correct/num_total,4)
return accuracy
def calculate_score(accuracy):
score = 0
if accuracy >= 0.92:
score = 10
elif accuracy >= 0.9:
score = 9
elif accuracy >= 0.85:
score = 8
elif accuracy >= 0.8:
score = 7
elif accuracy >= 0.75:
score = 6
elif accuracy >= 0.70:
score = 5
else:
score = 4
return score
if __name__ == '__main__':
program_start = time.time()
accuracy = evaluate()
score = calculate_score(accuracy)
program_end = time.time()
total_time = round(program_end - program_start,2)
print()
print("Execution time (seconds) = ", total_time)
print('Accuracy = ' + str(accuracy))
print("Score = ", score)
print()
| StarcoderdataPython |
3233811 | #! /usr/bin/env python3
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import logging
import sys
import math
from aaop import AAOPFile
def to_obj(args, infile, outfile):
for c in infile.comments:
outfile.write('# {c}\n'.format(c=c))
# Write all vertexes
for stack in range(infile.num_stacks):
outfile.write('# Stack {stack:d}\n'.format(stack=stack))
for spoke in range(infile.num_spokes):
phi = 2 * math.pi * spoke / infile.num_spokes
rho = infile.spokes[stack][spoke]
outfile.write('v {x:f} {y:f} {z:f}\n'.format(
z = infile.stacks[stack],
x = rho * math.cos(phi),
y = rho * math.sin(phi)))
# Write all faces. Make a rectangle for each stack that links the
# vertex in it to the next
for stack in range(infile.num_stacks - 1):
outfile.write('# Stack {stack:d}\n'.format(stack=stack))
cur_stack_idx = stack * infile.num_spokes + 1
next_stack_idx = (stack + 1) * infile.num_spokes + 1
for spoke in range(infile.num_spokes):
next_spoke = (spoke + 1) % infile.num_spokes
outfile.write('f {v1:d} {v2:d} {v3:d} {v4:d}\n'.format(
v1=cur_stack_idx + spoke,
v2=cur_stack_idx + next_spoke,
v3=next_stack_idx + next_spoke,
v4=next_stack_idx + spoke
))
if args.enclose:
outfile.write('# top cap\n')
outfile.write('f {v}\n'.format(
v=' '.join('%d' % (1 + d) for d in range(infile.num_spokes))))
outfile.write('# bottom cap\n')
outfile.write('f {v}\n'.format(
v=' '.join('%d' % (1 + d + infile.num_spokes * (infile.num_stacks - 1)) for d in range(infile.num_spokes))))
def main():
def handle_output(args, infile, outfile):
if args.output == 'obj':
to_obj(args, infile, outfile)
parser = argparse.ArgumentParser(description="Converts an AAOP file to a OBJ model")
parser.add_argument('--enclose', '-e', help="Enclose ends of model", action='store_true')
parser.add_argument('infile', help='AAOP input file (.aop). "-" for stdin')
parser.add_argument('outfile', help='Output file. "-" for stdout')
parser.add_argument('-o', '--output', help='Output format', choices=['obj'], default='obj')
args = parser.parse_args()
if args.infile == '-':
aaop = AAOPFile(sys.stdin)
else:
with open(args.infile, 'r') as infile:
aaop = AAOPFile(infile)
if args.outfile == '-':
handle_output(args, aaop, sys.stdout)
else:
with open(args.outfile, 'w') as outfile:
handle_output(args, aaop, outfile)
if __name__ == "__main__":
main()
| StarcoderdataPython |
149678 | # Code generated by moonworm : https://github.com/bugout-dev/moonworm
# Moonworm version : 0.1.15
import argparse
import json
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from brownie import Contract, network, project
from brownie.network.contract import ContractContainer
from eth_typing.evm import ChecksumAddress
PROJECT_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
BUILD_DIRECTORY = os.path.join(PROJECT_DIRECTORY, "build", "contracts")
def boolean_argument_type(raw_value: str) -> bool:
TRUE_VALUES = ["1", "t", "y", "true", "yes"]
FALSE_VALUES = ["0", "f", "n", "false", "no"]
if raw_value.lower() in TRUE_VALUES:
return True
elif raw_value.lower() in FALSE_VALUES:
return False
raise ValueError(
f"Invalid boolean argument: {raw_value}. Value must be one of: {','.join(TRUE_VALUES + FALSE_VALUES)}"
)
def bytes_argument_type(raw_value: str) -> bytes:
if raw_value == "":
return raw_value.encode()
return raw_value
def get_abi_json(abi_name: str) -> List[Dict[str, Any]]:
abi_full_path = os.path.join(BUILD_DIRECTORY, f"{abi_name}.json")
if not os.path.isfile(abi_full_path):
raise IOError(
f"File does not exist: {abi_full_path}. Maybe you have to compile the smart contracts?"
)
with open(abi_full_path, "r") as ifp:
build = json.load(ifp)
abi_json = build.get("abi")
if abi_json is None:
raise ValueError(f"Could not find ABI definition in: {abi_full_path}")
return abi_json
def contract_from_build(abi_name: str) -> ContractContainer:
# This is workaround because brownie currently doesn't support loading the same project multiple
# times. This causes problems when using multiple contracts from the same project in the same
# python project.
PROJECT = project.main.Project("moonworm", Path(PROJECT_DIRECTORY))
abi_full_path = os.path.join(BUILD_DIRECTORY, f"{abi_name}.json")
if not os.path.isfile(abi_full_path):
raise IOError(
f"File does not exist: {abi_full_path}. Maybe you have to compile the smart contracts?"
)
with open(abi_full_path, "r") as ifp:
build = json.load(ifp)
return ContractContainer(PROJECT, build)
class SignatureVerifier:
def __init__(self, contract_address: Optional[ChecksumAddress]):
self.contract_name = "SignatureVerifier"
self.address = contract_address
self.contract = None
self.abi = get_abi_json("SignatureVerifier")
if self.address is not None:
self.contract: Optional[Contract] = Contract.from_abi(
self.contract_name, self.address, self.abi
)
def deploy(self, name: str, transaction_config):
contract_class = contract_from_build(self.contract_name)
deployed_contract = contract_class.deploy(name, transaction_config)
self.address = deployed_contract.address
self.contract = deployed_contract
def assert_contract_is_instantiated(self) -> None:
if self.contract is None:
raise Exception("contract has not been instantiated")
def verify_contract(self):
self.assert_contract_is_instantiated()
contract_class = contract_from_build(self.contract_name)
contract_class.publish_source(self.contract)
def get_eip712_hash(self, new_value: int, block_deadline: int) -> Any:
self.assert_contract_is_instantiated()
return self.contract.getEIP712Hash.call(new_value, block_deadline)
def get_value(self) -> Any:
self.assert_contract_is_instantiated()
return self.contract.getValue.call()
def owner(self) -> Any:
self.assert_contract_is_instantiated()
return self.contract.owner.call()
def renounce_ownership(self, transaction_config) -> Any:
self.assert_contract_is_instantiated()
return self.contract.renounceOwnership(transaction_config)
def set_value(self, new_value: int, transaction_config) -> Any:
self.assert_contract_is_instantiated()
return self.contract.setValue(new_value, transaction_config)
def set_value_with_signature(
self, new_value: int, block_deadline: int, signature: bytes, transaction_config
) -> Any:
self.assert_contract_is_instantiated()
return self.contract.setValueWithSignature(
new_value, block_deadline, signature, transaction_config
)
def transfer_ownership(self, new_owner: ChecksumAddress, transaction_config) -> Any:
self.assert_contract_is_instantiated()
return self.contract.transferOwnership(new_owner, transaction_config)
def get_transaction_config(args: argparse.Namespace) -> Dict[str, Any]:
signer = network.accounts.load(args.sender, args.password)
transaction_config: Dict[str, Any] = {"from": signer}
if args.gas_price is not None:
transaction_config["gas_price"] = args.gas_price
if args.max_fee_per_gas is not None:
transaction_config["max_fee"] = args.max_fee_per_gas
if args.max_priority_fee_per_gas is not None:
transaction_config["priority_fee"] = args.max_priority_fee_per_gas
if args.confirmations is not None:
transaction_config["required_confs"] = args.confirmations
if args.nonce is not None:
transaction_config["nonce"] = args.nonce
return transaction_config
def add_default_arguments(parser: argparse.ArgumentParser, transact: bool) -> None:
parser.add_argument(
"--network", required=True, help="Name of brownie network to connect to"
)
parser.add_argument(
"--address", required=False, help="Address of deployed contract to connect to"
)
if not transact:
return
parser.add_argument(
"--sender", required=True, help="Path to keystore file for transaction sender"
)
parser.add_argument(
"--password",
required=False,
help="Password to keystore file (if you do not provide it, you will be prompted for it)",
)
parser.add_argument(
"--gas-price", default=None, help="Gas price at which to submit transaction"
)
parser.add_argument(
"--max-fee-per-gas",
default=None,
help="Max fee per gas for EIP1559 transactions",
)
parser.add_argument(
"--max-priority-fee-per-gas",
default=None,
help="Max priority fee per gas for EIP1559 transactions",
)
parser.add_argument(
"--confirmations",
type=int,
default=None,
help="Number of confirmations to await before considering a transaction completed",
)
parser.add_argument(
"--nonce", type=int, default=None, help="Nonce for the transaction (optional)"
)
parser.add_argument(
"--value", default=None, help="Value of the transaction in wei(optional)"
)
def handle_deploy(args: argparse.Namespace) -> None:
network.connect(args.network)
transaction_config = get_transaction_config(args)
contract = SignatureVerifier(None)
result = contract.deploy(name=args.name, transaction_config=transaction_config)
print(result)
def handle_verify_contract(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = SignatureVerifier(args.address)
result = contract.verify_contract()
print(result)
def handle_get_eip712_hash(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = SignatureVerifier(args.address)
result = contract.get_eip712_hash(
new_value=args.new_value, block_deadline=args.block_deadline
)
print(result)
def handle_get_value(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = SignatureVerifier(args.address)
result = contract.get_value()
print(result)
def handle_owner(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = SignatureVerifier(args.address)
result = contract.owner()
print(result)
def handle_renounce_ownership(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = SignatureVerifier(args.address)
transaction_config = get_transaction_config(args)
result = contract.renounce_ownership(transaction_config=transaction_config)
print(result)
def handle_set_value(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = SignatureVerifier(args.address)
transaction_config = get_transaction_config(args)
result = contract.set_value(
new_value=args.new_value, transaction_config=transaction_config
)
print(result)
def handle_set_value_with_signature(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = SignatureVerifier(args.address)
transaction_config = get_transaction_config(args)
result = contract.set_value_with_signature(
new_value=args.new_value,
block_deadline=args.block_deadline,
signature=args.signature,
transaction_config=transaction_config,
)
print(result)
def handle_transfer_ownership(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = SignatureVerifier(args.address)
transaction_config = get_transaction_config(args)
result = contract.transfer_ownership(
new_owner=args.new_owner, transaction_config=transaction_config
)
print(result)
def generate_cli() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="CLI for SignatureVerifier")
parser.set_defaults(func=lambda _: parser.print_help())
subcommands = parser.add_subparsers()
deploy_parser = subcommands.add_parser("deploy")
add_default_arguments(deploy_parser, True)
deploy_parser.add_argument("--name", required=True, help="Type: string", type=str)
deploy_parser.set_defaults(func=handle_deploy)
verify_contract_parser = subcommands.add_parser("verify-contract")
add_default_arguments(verify_contract_parser, False)
verify_contract_parser.set_defaults(func=handle_verify_contract)
get_eip712_hash_parser = subcommands.add_parser("get-eip712-hash")
add_default_arguments(get_eip712_hash_parser, False)
get_eip712_hash_parser.add_argument(
"--new-value", required=True, help="Type: uint256", type=int
)
get_eip712_hash_parser.add_argument(
"--block-deadline", required=True, help="Type: uint256", type=int
)
get_eip712_hash_parser.set_defaults(func=handle_get_eip712_hash)
get_value_parser = subcommands.add_parser("get-value")
add_default_arguments(get_value_parser, False)
get_value_parser.set_defaults(func=handle_get_value)
owner_parser = subcommands.add_parser("owner")
add_default_arguments(owner_parser, False)
owner_parser.set_defaults(func=handle_owner)
renounce_ownership_parser = subcommands.add_parser("renounce-ownership")
add_default_arguments(renounce_ownership_parser, True)
renounce_ownership_parser.set_defaults(func=handle_renounce_ownership)
set_value_parser = subcommands.add_parser("set-value")
add_default_arguments(set_value_parser, True)
set_value_parser.add_argument(
"--new-value", required=True, help="Type: uint256", type=int
)
set_value_parser.set_defaults(func=handle_set_value)
set_value_with_signature_parser = subcommands.add_parser("set-value-with-signature")
add_default_arguments(set_value_with_signature_parser, True)
set_value_with_signature_parser.add_argument(
"--new-value", required=True, help="Type: uint256", type=int
)
set_value_with_signature_parser.add_argument(
"--block-deadline", required=True, help="Type: uint256", type=int
)
set_value_with_signature_parser.add_argument(
"--signature", required=True, help="Type: bytes", type=bytes_argument_type
)
set_value_with_signature_parser.set_defaults(func=handle_set_value_with_signature)
transfer_ownership_parser = subcommands.add_parser("transfer-ownership")
add_default_arguments(transfer_ownership_parser, True)
transfer_ownership_parser.add_argument(
"--new-owner", required=True, help="Type: address"
)
transfer_ownership_parser.set_defaults(func=handle_transfer_ownership)
return parser
def main() -> None:
parser = generate_cli()
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1679371 | <gh_stars>1-10
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
from collections import defaultdict
from paddle.io import IterableDataset
class RecDataset(IterableDataset):
def __init__(self, file_list, config):
super(RecDataset, self).__init__()
self.config = config
self.file_list = file_list
self.init()
def init(self):
all_field_id = [
'101', '109_14', '110_14', '127_14', '150_14', '121', '122', '124',
'125', '126', '127', '128', '129', '205', '206', '207', '210',
'216', '508', '509', '702', '853', '301'
]
self.all_field_id_dict = defaultdict(int)
self.max_len = self.config.get("hyper_parameters.max_len", 3)
for i, field_id in enumerate(all_field_id):
self.all_field_id_dict[field_id] = [False, i]
self.padding = 0
def __iter__(self):
full_lines = []
self.data = []
for file in self.file_list:
with open(file, "r") as rf:
for l in rf:
features = l.strip().split(',')
ctr = int(features[1])
ctcvr = int(features[2])
output = [(field_id, [])
for field_id in self.all_field_id_dict]
output_list = []
for elem in features[4:]:
field_id, feat_id = elem.strip().split(':')
if field_id not in self.all_field_id_dict:
continue
self.all_field_id_dict[field_id][0] = True
index = self.all_field_id_dict[field_id][1]
output[index][1].append(int(feat_id))
for field_id in self.all_field_id_dict:
visited, index = self.all_field_id_dict[field_id]
self.all_field_id_dict[field_id][0] = False
if len(output[index][1]) > self.max_len:
output_list.append(
np.array(output[index][1][:self.max_len]))
else:
for ii in range(self.max_len - len(output[index][
1])):
output[index][1].append(self.padding)
output_list.append(np.array(output[index][1]))
output_list.append(np.array([ctr]))
output_list.append(np.array([ctcvr]))
yield output_list
| StarcoderdataPython |
3358257 | import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
import os
import pickle
raw = pd.read_csv('heloc_dataset_v1.csv', na_values = [-9])
# raw = pd.read_csv('heloc_dataset_v1.csv')
#### transform target
raw.loc[raw['RiskPerformance'] == 'Good', 'RiskPerformance'] = 1
raw.loc[raw['RiskPerformance'] == 'Bad', 'RiskPerformance'] = 0
#### transforming data
# see = raw[raw.isnull().any(axis=1)]
data = raw.copy(deep=True)
data.dropna(axis=0, inplace=True)
data.reset_index(drop=True, inplace=True)
imputer_mode = SimpleImputer(missing_values=-8, strategy='most_frequent')
data_y = data.pop('RiskPerformance')
data_X_impute = pd.DataFrame(imputer_mode.fit_transform(data), columns=data.columns, index=data.index)
data_X_impute.insert(0, 'RiskPerformance', data_y)
data = data_X_impute.copy(deep=True)
# data.loc[data['MSinceMostRecentDelq'] == -7, 'MSinceMostRecentDelq'] = max(data['MSinceMostRecentDelq'])
# data.loc[data['MSinceMostRecentInqexcl7days'] == -7, 'MSinceMostRecentInqexcl7days'] = max(data['MSinceMostRecentInqexcl7days'])
data.replace(-7, 0, inplace=True)
#### build dummies for cat
cat_var = data.loc[:, ['MaxDelq2PublicRecLast12M', 'MaxDelqEver']]
data.drop(['MaxDelq2PublicRecLast12M', 'MaxDelqEver'], axis=1, inplace=True)
encoder_MaxDelq2 = OneHotEncoder()
MaxDelq2 = encoder_MaxDelq2.fit_transform(cat_var.iloc[:, 0].values.reshape(-1, 1)).toarray()
MaxDelq2_Cols = []
for i in range(0, 9):
col = 'MaxDelq2PublicRecLast12M_' + str(i)
MaxDelq2_Cols.append(col)
MaxDelq2_df = pd.DataFrame(MaxDelq2, columns=MaxDelq2_Cols)
MaxDelq2_df.drop('MaxDelq2PublicRecLast12M_0', axis=1, inplace=True)
encoder_MaxDelqEver = OneHotEncoder()
MaxDelqEver = encoder_MaxDelqEver.fit_transform(cat_var.iloc[:, 1].values.reshape(-1, 1)).toarray()
MaxDelqEver_Cols = []
for i in range(2, 9):
col = 'MaxDelqEver_' + str(i)
MaxDelqEver_Cols.append(col)
MaxDelqEver_df = pd.DataFrame(MaxDelqEver, columns=MaxDelqEver_Cols)
MaxDelqEver_df.drop('MaxDelqEver_2', axis=1, inplace=True)
dummy_var = pd.get_dummies(cat_var, columns=['MaxDelq2PublicRecLast12M', 'MaxDelqEver'])
# dummy_var.insert(5,'MaxDelq2PublicRecLast12M_5_6', dummy_var.iloc[:, 6] + dummy_var.iloc[:, 7])
# dummy_var.drop(['MaxDelq2PublicRecLast12M_5.0', 'MaxDelq2PublicRecLast12M_6.0'], axis=1, inplace=True)
MaxDelq2_df.insert(5,'MaxDelq2PublicRecLast12M_5_6', MaxDelq2_df.iloc[:, 6] + MaxDelq2_df.iloc[:, 7])
MaxDelq2_df.drop(['MaxDelq2PublicRecLast12M_5', 'MaxDelq2PublicRecLast12M_6'], axis=1, inplace=True)
data_final = pd.concat([data, MaxDelq2_df, MaxDelqEver_df], axis=1)
data_final.to_csv('data_set_cleaned_v4.csv', index=False)
pickle.dump(imputer_mode, open('imputer_mode.sav', 'wb'))
pickle.dump(encoder_MaxDelq2, open('OneHot_MaxDelq2.sav', 'wb'))
pickle.dump(encoder_MaxDelqEver, open('OneHot_MaxDelqEver.sav', 'wb')) | StarcoderdataPython |
96134 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 22:55:34 2021
@author: logan
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys; sys.path.append("../../meshcnn")
from meshcnn.ops import MeshConv, DownSamp, ResBlock
import os
class DownSamp(nn.Module):
def __init__(self, nv_prev):
super().__init__()
self.nv_prev = nv_prev
class ResBlock(nn.Module):
def __init__(self, in_chan, out_chan, level, mesh_folder):
super().__init__()
l = level
mesh_file = os.path.join(mesh_folder, "icosphere_{}.pkl".format(l))
self.conv1 = MeshConv(in_chan, out_chan, mesh_file=mesh_file, stride=1)
self.conv2 = MeshConv(out_chan, out_chan, mesh_file=mesh_file, stride=2)
self.relu = nn.ReLU(inplace=True)
self.bn1 = nn.BatchNorm1d(out_chan)
self.bn2 = nn.BatchNorm1d(out_chan)
self.nv_prev = self.conv2.nv_prev
if in_chan != out_chan:
self.shortcut = nn.Sequential(
MeshConv(in_chan, out_chan, mesh_file=mesh_file, stride=2),
nn.BatchNorm1d(out_chan)
)
else:
self.shortcut.nn.Sequential()
def forward(self,x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += self.shortcut(x)
out = self.relu(out)
return out
class Model(nn.Module):
def __init__(self, mesh_folder, feat=32):
super().__init__()
mf = os.path.join(mesh_folder, "icosphere_6.pkl")
self.in_conv = MeshConv(4, feat, mesh_file=mf, stride=1)
self.in_bn = nn.BatchNorm1d(feat)
self.relu = nn.ReLU(inplace=True)
self.in_block = nn.Sequential(self.in_conv, self.in_bn, self.relu)
self.block1 = ResBlock(in_chan=feat, out_chan=2*feat, level=6, mesh_folder=mesh_folder)
self.block2 = ResBlock(in_chan=2*feat, out_chan=4*feat, level=5, mesh_folder=mesh_folder)
self.block3 = ResBlock(in_chan=4*feat, out_chan=8*feat, level=4, mesh_folder=mesh_folder)
self.block4 = ResBlock(in_chan=8*feat, out_chan=16*feat, level=3, mesh_folder=mesh_folder)
self.avg = nn.MaxPool1d(kernel_size=self.block4.nv_prev) # output shape batch x channels x 1
self.out_layer = nn.Linear(16*feat, 1)
def forward(self, x):
x = self.in_block(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = torch.squeeze(self.avg(x))
x = F.dropout(x, training=self.training)
x = self.out_layer(x)
return x
class Model_confound(nn.Module):
def __init__(self, mesh_folder, feat=32):
super().__init__()
mf = os.path.join(mesh_folder, "icosphere_6.pkl")
self.in_conv = MeshConv(4, feat, mesh_file=mf, stride=1)
self.in_bn = nn.BatchNorm1d(feat)
self.relu = nn.ReLU(inplace=True)
self.in_block = nn.Sequential(self.in_conv, self.in_bn, self.relu)
self.block1 = ResBlock(in_chan=feat, out_chan=2*feat, level=6, mesh_folder=mesh_folder)
self.block2 = ResBlock(in_chan=2*feat, out_chan=4*feat, level=5, mesh_folder=mesh_folder)
self.block3 = ResBlock(in_chan=4*feat, out_chan=8*feat, level=4, mesh_folder=mesh_folder)
self.block4 = ResBlock(in_chan=8*feat, out_chan=16*feat, level=3, mesh_folder=mesh_folder)
self.avg = nn.MaxPool1d(kernel_size=self.block4.nv_prev) # output shape batch x channels x 1
self.out_layer = nn.Linear(16*feat+4, 1)
self.conv11 = nn.Conv1d(1,4, kernel_size = 1)
def forward(self, x,m):
x = self.in_block(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = torch.squeeze(self.avg(x))
x = F.dropout(x, training=self.training)
m = self.conv11(m)
m = nn.ReLU()(m)
m = m.reshape(m.shape[0],-1)
m = m.squeeze()
out = torch.cat([x,m], dim=0)
out = out.squeeze()
x = self.out_layer(out)
return x | StarcoderdataPython |
4806101 | <filename>molsysmt/element/molecule/__init__.py
from . import water
from . import ion
from . import cosolute
from . import small_molecule
from . import peptide
from . import protein
from . import dna
from . import rna
from . import lipid
from .get_molecule_index_from_atom import get_molecule_index_from_atom
from .get_molecule_id_from_molecule import get_molecule_id_from_molecule
from .get_molecule_type_from_molecule import get_molecule_type_from_molecule
from .get_molecule_name_from_molecule import get_molecule_name_from_molecule
from .get_molecule_type_from_group_names import get_molecule_type_from_group_names
from .get_molecule_type_from_sequence import get_molecule_type_from_sequence
from .get_n_molecules_from_system import get_n_molecules_from_system
| StarcoderdataPython |
3380226 | <reponame>wakaflorien/cst-research-api<gh_stars>1-10
# Generated by Django 3.2.2 on 2021-09-18 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='gender',
field=models.CharField(choices=[('FEMALE', 'Female'), ('MALE', 'Male')], max_length=100),
),
]
| StarcoderdataPython |
162277 | from fastapi import Depends
from server.dependencies import get_db
from sqlalchemy.orm import Session
from users.schemas import Token_Schema
from .models import User, Blacklisted_Token
def register_or_login(user_data: dict, user_type: str, db: Session = Depends(get_db)):
email = user_data["email"]
name = user_data["name"]
avatar = user_data["picture"]
is_verified = user_data["hd"] == "iiti.ac.in"
user = db.query(User).filter(User.email == user_data["email"]).first()
if user is None:
new_user = User(
email=email,
name=name,
avatar=avatar,
user_type=user_type,
is_verified=is_verified,
)
db.add(new_user)
db.commit()
return user_type
return user.user_type
def add_blacklist_token(token: Token_Schema, db: Session = Depends(get_db)) -> bool:
try:
blacklist_token = Blacklisted_Token(**token.dict())
db.add(blacklist_token)
db.commit()
return True
except:
return False
def is_token_blacklisted(token: Token_Schema, db: Session = Depends(get_db)) -> bool:
blacklist_token = (
db.query(Blacklisted_Token).filter(Blacklisted_Token.token == token).first()
)
if blacklist_token is None:
return False
return True
| StarcoderdataPython |
3380114 | <filename>handler.py
import json
import os
import sys
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, "./vendored"))
import requests
TOKEN = os.environ['TELEGRAM_TOKEN']
BASE_URL = "https://api.telegram.org/bot{}".format(TOKEN)
def hello(event, context):
try:
data = json.loads(event["body"])
message = str(data["message"]["text"])
chat_id = data["message"]["chat"]["id"]
first_name = data["message"]["chat"]["first_name"]
response = "Please /start, {}".format(first_name)
if "start" in message:
response = "Hello {}".format(first_name)
data = {"text": response.encode("utf8"), "chat_id": chat_id}
url = BASE_URL + "/sendMessage"
requests.post(url, data)
except Exception as e:
print(e)
return {"statusCode": 200}
| StarcoderdataPython |
3391530 |
from minerl.herobraine.hero.handlers.actionable import *
from minerl.herobraine.hero.handlers.mission import *
from minerl.herobraine.hero.handlers.observables import *
from minerl.herobraine.hero.handlers.rewardables import *
| StarcoderdataPython |
45237 | DEFAULT_OCR_AUTO_OCR = True
DEFAULT_OCR_BACKEND = 'mayan.apps.ocr.backends.tesseract.Tesseract'
DEFAULT_OCR_BACKEND_ARGUMENTS = {'environment': {'OMP_THREAD_LIMIT': '1'}}
TASK_DOCUMENT_VERSION_PAGE_OCR_RETRY_DELAY = 10
TASK_DOCUMENT_VERSION_PAGE_OCR_TIMEOUT = 10 * 60 # 10 Minutes per page
| StarcoderdataPython |
3230747 | # Solution 1
# O(bns) time / O(n) space
# b length of the bigString
# n length of the smallStrings array
# s length of the largest small string
def multiStringSearch(bigString, smallStrings):
return [isInBigString(bigString, smallString) for smallString in smallStrings]
def isInBigString(bigString, smallString):
for i in range(len(bigString)):
if i + len(smallString) > len(bigString):
break
if isInBigStringHelper(bigString, smallString, i):
return True
return False
def isInBigStringHelper(bigString, smallString, startIdx):
leftBigIdx = startIdx
rightBigIdx = startIdx + len(smallString) - 1
leftSmallIdx = 0
rightSmallIdx = len(smallString) - 1
while leftBigIdx <= rightBigIdx:
if bigString[leftBigIdx] != smallString[leftSmallIdx] or bigString[rightBigIdx] != smallString[rightSmallIdx]:
return False
leftBigIdx += 1
rightBigIdx -= 1
leftSmallIdx += 1
rightSmallIdx -= 1
return True
# Solution 2
# O(b^2 + ns) time / O(b^2 + n) space
# b length of the bigString
# n length of the smallStrings array
# s length of the largest small string
def multiStringSearch(bigString, smallStrings):
modifiedSuffixTrie = ModifiedSuffixTrie(bigString)
return [modifiedSuffixTrie.contains(string) for string in smallStrings]
class ModifiedSuffixTrie:
def __init__(self, string):
self.root = {}
self.populateModifiedSufixTrieFrom(string)
def populateModifiedSufixTrieFrom(self, string):
for i in range(len(string)):
self.insertSubstringAt(i, string)
def insertSubstringAt(self, i, string):
node = self.root
for j in range(i, len(string)):
letter = string[j]
if letter not in node:
node[letter] = {}
node = node[letter]
def contains(self, string):
node = self.root
for letter in string:
if letter not in node:
return False
node = node[letter]
return True
# Solution 3
# O(ns + bs) time / O(ns) space
# b length of the bigString
# n length of the smallStrings array
# s length of the largest small string
def multiStringSearch(bigString, smallStrings):
trie = Trie()
for string in smallStrings:
trie.insert(string)
containedStrings = {}
for i in range(len(bigString)):
findSmallStringsIn(bigString, i, trie, containedStrings)
return [string in containedStrings for string in smallStrings]
def findSmallStringsIn(string, startIdx, trie, containedStrings):
currentNode = trie.root
for i in range(startIdx, len(string)):
currentChar = string[i]
if currentChar not in currentNode:
break
currentNode = currentNode[currentChar]
if trie.endSymbol in currentNode:
containedStrings[currentNode[trie.endSymbol]] = True
class Trie:
def __init__(self):
self.root = {}
self.endSymbol = '*'
def insert(self, string):
current = self.root
for i in range(len(string)):
if string[i] not in current:
current[string[i]] = {}
current = current[string[i]]
current[self.endSymbol] = string
| StarcoderdataPython |
3273186 | import re
import ipaddress
import argparse
class nmapError(Exception):
pass
class nmap(object):
IPV4_RE = r'(\d{1,3}\.){3}\d{1,3}'
CIDR_RE = r'^{}/[1-3]?[0-9]$'.format(IPV4_RE)
OPTION = 1
HOST = 2
PORT = 3
SCRIPT = 4
OPTION_LIST = ['-PR', '-PS', '-sS', '-sT', '-sU', '-sA', '-OT']
def __init__(self, agent_id_list):
"""
Initialise the module and ensure that all agents have nmap installed
:param agent_list: a list of agent ids
:return: None
"""
self.agent_list = agent_id_list
def validate_command(self, command_list):
# checks if the length of the command list is valid
if len(command_list) != 4:
return False
# check if the first option is a valid one
option = command_list[self.OPTION]
if option not in self.OPTION_LIST:
return False
# check if the ports is in the right format
port = command_list[self.PORT]
if not re.match(r'^\d{1,5}(-\d{1,5})?$', port):
return False
return True
def parse_command(self, command):
"""
Interpret the command in the string form and calls the respective functions
Command format:
nmap -sS 192.168.1.1/24 1-65535 --scripts=smileyface.py
nmap -PS 192.168.1.1 1-10000
nmap -PS 192.168.1.1-192.168.1.255 1-10000
:param command: the command string to run
:return: None
"""
command_list = command.split()
if not self.validate_command(command_list):
raise nmapError('Invalid command!\n Usage: nmap options hosts [ports]')
ip_list = self.gather_hosts_from_cidr(command_list[self.HOST])
port_list = command_list[self.PORT].split('-')
port_start, port_stop = int(port_list[0]), int(port_list[-1])
workload = self.divide_workload(ip_list, port_start, port_stop, command_list)
return workload
def gather_hosts_from_cidr(self, cidr_string):
if len(cidr_string.split('/')) == 1 and re.match(r'^{}$'.format(self.IPV4_RE), cidr_string):
return [cidr_string]
elif len(cidr_string.split('/')) == 2 and re.match(self.CIDR_RE, cidr_string):
return [ str(ip) for ip in list(ipaddress.ip_network(cidr_string).hosts())]
else:
raise nmapError('Invalid CIDR notation! Eg. 192.168.1.0/24')
def divide_workload(self, ip_list, port_start, port_stop, command_list):
"""
Divides the workload equally among the agents
:param ip_list: a list of ip addresses
:param port_start:
:param port_stop:
:param command_list: a list of the command bring split by groups
:return: a dictionary of commands that would be ran on the individual agents
"""
split_by_ip = False
ips_per_agent = 0
ports_per_agent = 0
if command_list[self.OPTION] == '-PR':
total_ips = len(ip_list)
ips_per_agent = total_ips // len(self.agent_list)
split_by_ip = True
else:
total_ports = port_stop - port_start + 1
ports_per_agent = total_ports // len(self.agent_list)
workload = {}
for index in range(len(self.agent_list)):
if split_by_ip:
workload[self.agent_list[index]] = self.generate_agent_command(split_by_ip, index, ips_per_agent,
command_list, ip_list=ip_list)
else:
workload[self.agent_list[index]] = self.generate_agent_command(split_by_ip, index, ports_per_agent,
command_list, port_start=port_start,
port_stop=port_stop)
return workload
def generate_agent_command(self, split_by_ip, index, no_per_agent, command_list, ip_list=None, port_start=None,
port_stop=None):
if split_by_ip:
start = index * no_per_agent
end = ((index + 1) * no_per_agent) if ((index + 1) * no_per_agent) < len(ip_list) else -1
command_list[self.HOST] = '{}-{}'.format(ip_list[start], ip_list[end])
return ' '.join(command_list)
else:
start = port_start + (index * no_per_agent)
end = (port_start + (index + 1) * no_per_agent) if ((index + 1) * no_per_agent) < (
port_stop - port_start) else port_stop
command_list[self.PORT] = '{}-{}'.format(start, end)
return ' '.join(command_list)
| StarcoderdataPython |
1793541 | <reponame>codescribblr/project-manager-django3<gh_stars>0
# Generated by Django 3.0.4 on 2020-03-19 17:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='accountuser',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='accountnote',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='accounts.Account'),
),
migrations.AddField(
model_name='accountinvite',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invites', to='accounts.Account'),
),
migrations.AddField(
model_name='account',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this account belongs to. An account will get all permissions granted to each of its groups.', to='accounts.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='account',
name='owner',
field=models.ForeignKey(help_text='User who owns or administrates this account', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='account', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='account',
name='permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permission for this account', to='accounts.Permission', verbose_name='permissions'),
),
migrations.AddField(
model_name='account',
name='users',
field=models.ManyToManyField(blank=True, help_text='Members of the company represented by this account.', related_name='accounts', through='accounts.AccountUser', to=settings.AUTH_USER_MODEL, verbose_name='users'),
),
]
| StarcoderdataPython |
74306 | <reponame>SykieChen/WeiboBlackList
import requests
class WeiboSession(requests.Session):
def __init__(self, username, password):
super(WeiboSession, self).__init__()
self.__username = username
self.__password = password
def __del__(self):
self.close()
def login(self):
loginURL = "http://passport.weibo.cn/sso/login"
data = {
"username": self.__username,
"password": self.__password,
"savestate": "1",
"r": "http://m.weibo.cn/",
"ec": "0",
"entry": "mweibo",
"mainpageflag": "1",
}
self.headers.update({
"Referer": "http://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F&sudaref=passport.weibo.cn&retcode=6102",
})
retJson = self.post(loginURL, data=data).json()
if retJson["retcode"] == 20000000:
for tmpURL in retJson["data"]["crossdomainlist"].values():
self.get(tmpURL)
myURL = "http://weibo.cn/"
self.get(myURL)
if __name__ == "__main__":
weibo = WeiboSession("", "")
| StarcoderdataPython |
41016 | from datetime import timedelta
import app_config
import dateutil.parser
from googleapiclient.discovery import build
from injector import inject
from models import AllDayCalendarEntry, CalendarEntry
from google_api import GoogleAuthenication
class GoogleCalendar:
@inject
def __init__(self, auth: GoogleAuthenication):
self.auth = auth
def query_calendar(self, start, end):
results = []
creds = self.auth.creds
service = build('calendar', 'v3', credentials=creds,
cache_discovery=False)
time_min = start + "T00:00:00Z"
time_max = end + "T23:59:59Z"
events_result = service.events().list(calendarId='primary', timeMin=time_min, timeMax=time_max,
singleEvents=True, showDeleted=False, timeZone=app_config.CALENDAR_TIMEZONE,
orderBy='startTime').execute()
events = events_result.get('items', [])
for event in events:
if 'dateTime' in event['start'] and 'dateTime' in event['end']:
results.append(CalendarEntry(description=event['summary'], date=event['start']
['dateTime'][0:10], time=event['start']['dateTime'][11:16], is_primary=False))
elif 'date' in event['start'] and 'date' in event['end']:
current = dateutil.parser.parse(
event['start']['date'])
range_end = dateutil.parser.parse(
event['end']['date'])
while current < range_end:
results.append(AllDayCalendarEntry(description=event['summary'], date=current.strftime(
'%Y-%m-%d'), is_primary=False))
current = current + timedelta(days=1)
return results
| StarcoderdataPython |
1645682 | # https://www.hackerrank.com/challenges/python-lists/problem
queries = int(input())
data = []
for _ in range(queries):
command, *parameter = input().split()
parameters = list(map(int, parameter))
if command == 'insert':
data.insert(parameters[0], parameters[1])
elif command == 'print':
print(data)
elif command == 'remove':
data.remove(parameters[0])
elif command == 'append':
data.append(parameters[0])
elif command == 'sort':
data.sort()
elif command == 'pop':
data.pop()
elif command == 'reverse':
data.reverse() | StarcoderdataPython |
3327489 | <filename>Phaedra/__main__.py
from Phaedra.Language import Mode, set_mode
from Phaedra.Secrets import get_secrets, set_secrets
set_secrets(get_secrets())
set_mode(Mode.REMOTE)
from Phaedra.API import run
run()
# from json import dump
# from Phaedra.Notebook import Notebook
# dump(
# Notebook.from_pdf(path="C:/Users/alenk/Desktop/MIT Portfolio.pdf").json(),
# open("C:/Users/alenk/Desktop/MIT Portfolio.json", "w"),
# )
| StarcoderdataPython |
113395 | <reponame>snwhd/pyretrommo
#!/usr/bin/env python3
# this file is auto-generated by gen_from_wiki.py
from __future__ import annotations
from typing import (
Dict,
Tuple,
)
from ..item import EquipmentItem
from .ability import Ability
from .equipment import find_equipment
from .player_class import PlayerClass
CLASS_ABILITIES: Dict[PlayerClass, Dict[Ability, int]] = {
PlayerClass.Cleric: {
Ability.Attack: 1,
Ability.Heal: 1,
Ability.HealWave: 10,
Ability.Smite: 2,
Ability.Pass: 1,
Ability.Escape: 1,
},
PlayerClass.Warrior: {
Ability.Attack: 1,
Ability.Guard: 1,
Ability.Pass: 1,
Ability.Escape: 1,
},
PlayerClass.Wizard: {
Ability.Attack: 1,
Ability.Fireball: 1,
Ability.Firewall: 4,
Ability.Vitality: 5,
Ability.Teleport: 7,
Ability.Pass: 1,
Ability.Escape: 1,
},
}
CLASS_EQUIPMENT: Dict[PlayerClass, Dict['EquipmentItem', int]] = {
PlayerClass.Cleric: {
find_equipment('TrainingWand'): 4,
find_equipment('BoneBracelet'): 8,
find_equipment('JaggedCrown'): 8,
find_equipment('CrookedWand'): 6,
find_equipment('CypressStick'): 1,
find_equipment('LeatherCap'): 1,
find_equipment('OakenClub'): 1,
find_equipment('PaddedGarb'): 2,
find_equipment('PlainClothes'): 1,
find_equipment('RustyDagger'): 6,
find_equipment('TatteredCloak'): 2,
find_equipment('SimpleBracelet'): 1,
},
PlayerClass.Warrior: {
find_equipment('CypressStick'): 1,
find_equipment('DentedHelm'): 6,
find_equipment('LeatherArmor'): 2,
find_equipment('LeatherCap'): 1,
find_equipment('OakenClub'): 1,
find_equipment('PlainClothes'): 1,
find_equipment('StuddedShield'): 6,
find_equipment('TheTenderizer'): 8,
find_equipment('TrainingSword'): 4,
find_equipment('WoodenShield'): 1,
},
PlayerClass.Wizard: {
find_equipment('BoneBracelet'): 8,
find_equipment('CrookedWand'): 6,
find_equipment('CypressStick'): 1,
find_equipment('JaggedCrown'): 8,
find_equipment('LeatherCap'): 1,
find_equipment('MageHat'): 6,
find_equipment('PlainClothes'): 1,
find_equipment('SimpleBracelet'): 1,
find_equipment('TatteredCloak'): 2,
find_equipment('TrainingWand'): 4,
},
}
| StarcoderdataPython |
3267542 | def login(username, password) :
user_data = username + ":" + password + "\n"
try:
auth_data = open("start.auth", "r")
except FileNotFoundError :
print("Wrong Username or Password \n")
return False
auth_state = False
# print auth_data.readlines()
for auth in auth_data :
if user_data == auth :
print("Signed in")
auth_state = True
return auth_state
if auth_state != True :
print("Wrong Username or Password \n")
return False
auth_data.close()
def signup(username, password) :
user_data = [username + ":" + password, '\n']
auth_data = open("start.auth", "a")
auth_data.writelines(user_data)
print("\nRegistration complete \n \nYou can START next by placing your username and password as arguement \ne.g $ python start.py [username] [password] \noptional parameter menu=[option]\n")
auth_data.close()
return True
def check_user(username) :
try:
auth_data = open("start.auth", "r")
except FileNotFoundError :
return True
auth_state = False
for auth in auth_data :
auth = auth.replace("\n", "").split(":")
if auth[0] == username :
print("username already exist \n")
auth_state = False
break
else :
auth_state = True
if auth_state == False :
return False
else:
return True
| StarcoderdataPython |
3250856 | <reponame>mwiens91/twitch-game-notify
"""Functions for processing and displaying notifications."""
import datetime
import logging
import time
import notify2
import requests
from twitchgamenotify.constants import HTTP_502_BAD_GATEWAY
from twitchgamenotify.twitch_api import FailedHttpRequest
from twitchgamenotify.version import NAME
# ANSI escape sequence for bold text
ANSI_BOLD = "\033[1m"
ANSI_END = "\033[0m"
def print_notification_to_terminal(streamer_name, stream_title, game_name):
"""Print a game notification to the terminal.
Args:
streamer_name: A string containing the name of the streamer.
stream_title: A string containing the title of the stream.
game_name: A string containing the name of the game.
"""
print(ANSI_BOLD + streamer_name + ANSI_END, end="")
print(" @ " + datetime.datetime.now().isoformat())
print("Streaming %s" % game_name)
print("Title: %s" % stream_title)
def send_notification_to_dbus(streamer_name, stream_title, game_name):
"""Send a game notification to D-Bus.
Args:
streamer_name: A string containing the name of the streamer.
stream_title: A string containing the title of the stream.
game_name: A string containing the name of the game.
"""
notify2.Notification(
streamer_name + " @ " + time.strftime("%H:%M"),
"Streaming %s\nTitle: %s" % (game_name, stream_title),
).show()
def send_error_notification(error_message, send_dbus_notification):
"""Logs and notifies an error message.
Args:
error_message: A string containing the error message
send_dbus_notification: A boolean specifying whether to send a
notification to D-Bus.
"""
# Log the error
logging.error(error_message)
# Send a notification about the error, if instructed to
if send_dbus_notification:
notify2.Notification(
NAME + " @ " + time.strftime("%H:%M"), error_message
).show()
def send_connection_error_notification(send_dbus_notification, retry_seconds):
"""Logs and notifies about a connection failure.
Args:
send_dbus_notification: A boolean specifying whether to send a
notification to D-Bus.
retry_seconds: A string containing the number of seconds before
the next connection attempt.
"""
# The message to show
error_message = (
"Unable to connect to Twitch. Retrying in %ss" % retry_seconds
)
# Show the message
send_error_notification(error_message, send_dbus_notification)
def send_authentication_error_notification(send_dbus_notification):
"""Logs and notifies about an authentication failure.
Arg:
send_dbus_notification: A boolean specifying whether to send a
notification to D-Bus.
"""
# The message to show
error_message = (
"Invalid authentication credentials for Twitch API. Exiting."
)
# Show the message
send_error_notification(error_message, send_dbus_notification)
def handle_failed_http_request(e, ignore_502s, print_to_terminal):
"""Handle a failed HTTP request occuring when querying the Twitch API.
Args:
e: An exception of type FailedHttpRequest.
ignore_502s: A boolean signaling whether to ignore 502 errors when
querying the Twitch API.
print_to_terminal: A boolean signalling whether to
print to the terminal instead of passing a message to D-Bus.
"""
if (
not ignore_502s
or ignore_502s
and e.status_code != HTTP_502_BAD_GATEWAY
):
send_error_notification(e.message, not print_to_terminal)
def process_notifications_for_streamer(
streamer_login_name,
games,
twitch_api,
ignore_502s,
streamers_previous_game,
print_to_terminal,
):
"""Query the Twitch API for a spcific streamer and display notifications.
Args:
ignore_502s: A boolean signaling whether to ignore 502 errors when
querying the Twitch API.
games: A dictionary containing information about what games to
allow (or disallow) for the streamer. See the configuration
file for how these look.
print_to_terminal: A boolean signalling whether to
print to the terminal instead of passing a message to D-Bus.
streamer_login_name: A string containing the login name of the
streamer to process notifications for.
streamers_previous_game: A dictionary containing
information about what game a streamer was last seen
playing. The keys are strings containing the streamers
login name, and the keys are strings containing the game ID
of what they were last seen playing (or an empty string if
the streamer hasn't yet been seen live). Can be None.
twitch_api: An authenticated TwitchApi object to interact with
Twitch's API.
"""
try:
# Get info about stream
info = twitch_api.get_online_stream_info(streamer_login_name)
except FailedHttpRequest as e:
handle_failed_http_request(e, ignore_502s, print_to_terminal)
return
# If the streamer isn't live, record that they aren't playing
# anything and move onto the next streamer
if not info["live"]:
# Mark them as last seen playing nothing
if (
streamers_previous_game
and streamers_previous_game[streamer_login_name]
):
streamers_previous_game[streamer_login_name] = ""
return
# Check if this is a game to notify about
game_id = info["game_id"]
game_name = info["game_name"]
# If the streamer was last seen playing this game, move on. If
# they are playing something new, record it.
if streamers_previous_game:
if streamers_previous_game[streamer_login_name] == game_id:
# The streamer is playing the same game as before
return
# Streamer is playing something new. Update the previously
# seen game.
streamers_previous_game[streamer_login_name] = game_id
# Check the include (and possibly exclude) list
if "*" in games["include"]:
# All games are included. Check if we need to exclude any
# games.
if "exclude" in games and (
game_id in games["exclude"] or game_name in games["exclude"]
):
return
elif game_id not in games["include"] or game_name not in games["include"]:
# Game not in the include list
return
# Send a notification
if print_to_terminal:
print_notification_to_terminal(
info["user_display_name"], info["title"], info["game_name"]
)
else:
send_notification_to_dbus(
info["user_display_name"], info["title"], info["game_name"]
)
def process_notifications(
streamers,
twitch_api,
ignore_502s,
streamers_previous_game=None,
print_to_terminal=False,
):
"""Query the Twitch API for all streamers and display notifications.
The whole function is a big loop going over all the streamers present
in the config file.
Args:
ignore_502s: A boolean signaling whether to ignore 502 errors when
querying the Twitch API.
print_to_terminal: An optional boolean signalling whether to
print to the terminal instead of passing a message to D-Bus.
Defaults to False.
streamers: A dictionary of streamers where the keys are strings
containing the streamer's login name and the values are
dictionaries containing the user's settings for the
streamer. For example:
{'shroud: {'include': ['Valorant']},
'hasanabi': {'include': ['*'], 'exclude': ['Just Chatting']},
'tyler1': {'include': ['*'], 'exclude': ['33214']}}
streamers_previous_game: An optional dictionary containing
information about what game a streamer was last seen
playing. The keys are strings containing the streamers
login name, and the keys are strings containing the game ID
of what they were last seen playing (or an empty string if
the streamer hasn't yet been seen live). This defaults to
None, which is used when this function is only being called
once.
twitch_api: An authenticated TwitchApi object to interact with
Twitch's API.
"""
# Look up info about each streamer's stream
for streamer_login_name, games_dict in streamers.items():
process_notifications_for_streamer(
streamer_login_name,
games_dict,
twitch_api,
ignore_502s,
streamers_previous_game,
print_to_terminal,
)
def process_notifications_wrapper(*args, **kwargs):
"""A wrapper for process_notifications to catch connection errors.
This makes the code a bit cleaner than inserting lots of try/except
blocks into the process_notifications function.
"""
# Determine whether to print to the terminal
display_dbus_notification = not kwargs["print_to_terminal"]
# Try calling process_notifications
retry_attempt = 0
try:
process_notifications(*args, **kwargs)
if retry_attempt:
retry_attempt = 0
except requests.exceptions.ConnectionError:
# Bad connection - stop this iteration and wait
retry_attempt += 1
sleep_delta = 2 ** retry_attempt
send_connection_error_notification(
send_dbus_notification=display_dbus_notification,
retry_seconds=sleep_delta,
)
| StarcoderdataPython |
1615663 | import sys
import socket
try:
import pika
except ImportError:
print("RabbitMQ test requested, but pika not installed. "
"Try 'pip install pika' and try again.")
sys.exit(1)
def rabbit_check(config):
host = config.get("host", "localhost")
port = int(config.get("port", 5672))
params = pika.ConnectionParameters(host, port)
try:
conn = pika.BlockingConnection(params)
conn.channel()
except:
return False
else:
return True
| StarcoderdataPython |
3345279 | <reponame>luck97/LP2_2s2017
# -*- coding: utf-8 -*-
# Exercícios by <NAME> (CodingBat)
# F. middle_way
# sejam duas listas de inteiros a e b
# retorna uma lista de tamanho 2 contendo os elementos do
# meio de a e b, suponha que as listas tem tamanho ímpar
# middle_way([1, 2, 3], [4, 5, 6]) -> [2, 5]
# middle_way([7, 7, 7], [3, 8, 0]) -> [7, 8]
# middle_way([5, 2, 9], [1, 4, 5]) -> [2, 4]
def middle_way(a, b):
c=[0,0]
c[0] = a[1]
c[1] = b[1]
return c
def test_ex06():
print ('middle_way')
assert middle_way([1, 2, 3], [4, 5, 6]) == [2, 5]
assert middle_way([7, 7, 7], [3, 8, 0]) == [7, 8]
assert middle_way([5, 2, 9], [1, 4, 5]) == [2, 4]
assert middle_way([1, 9, 7], [4, 8, 8]) == [9, 8]
assert middle_way([1, 2, 3], [3, 1, 4]) == [2, 1]
assert middle_way([1, 2, 3], [4, 1, 1]) == [2, 1]
| StarcoderdataPython |
3324627 | <reponame>ManishSahu53/wifi-finder
import numpy as np
from src import point, vector, line, source
# Initializing Wifi source at random 3D location, which we don't know
sx, sy, sz = np.random.randint(0, 100, 1)[0], np.random.randint(0, 100, 1)[0], np.random.randint(0, 100, 1)[0]
wifi = source.Source(sx, sy, sz)
# Initializing our Detector at 0,0,0
initx, inity, initz = 0, 0, 0
# Getting Intensity of WIfi Signal
start = point.Point(initx, inity, initz)
start.Intensity(wifi)
#################################### XY PLANE ###########################################
# Moving to 1,1,0 from 0,0,0
init_p1, init_p2 = point.Point(initx, inity, initz), point.Point(1, 1, initz)
# Moving Along XY Plane
# Creating a line path where our detector should be moving to detect the source
init_line = line.Line(init_p1, init_p2, axis=2)
# Moving along above defined line to find 2D location
interation = 100
found = False
counter = 0
step = 1
inten1 = start.Intensity(wifi)
while found is False and counter < interation:
start.MoveAlongLine_xy(init_line, step)
inten2 = start.Intensity(wifi)
if inten2 < inten1:
step = -step/3
if abs((inten2 - inten1) *100 / inten1) < 0.005:
found = True
inten1, inten2 = inten2, None
# Intensity of Signal will be maximum when distance between source point and line is Minimum
# At the point of minimum distance (Perpendicular to line),
# Wifi will be present at perpendicular distance to the line
new_direction = init_line.GetPerpendicularFromPoint(start, axis=2)
print('Line towards WIfi Source, Slope: {}, Intercept: {}'.format(new_direction.slope_xy, new_direction.intercept_xy))
# Now resetting our detector to origin 0, 0, 0
# Move along another direction to get one more line point towards wifi,
# So that we can find interection between these 2 lines to get 2D Coordinate of Wifi
start.Goto(initx, inity, initz)
# Moving in the direction of new line from Starting point
interation = 100
found = False
counter = 0
step = 1
inten1 = start.Intensity(wifi)
while found is False and counter < interation:
start.MoveAlongLine_xy(new_direction, step)
inten2 = start.Intensity(wifi)
if inten2 < inten1:
step = -step/3
if abs((inten2 - inten1) *100 / inten1) < 0.005:
found = True
inten1, inten2 = inten2, None
# Intensity of Signal will be maximum when distance between source point and line is Minimum
# At the point of minimum distance (Perpendicular to line),
# Wifi will be present at perpendicular distance to the line
new_direction_2 = new_direction.GetPerpendicularFromPoint(start, axis=2)
# Finding intersection of these 2 lines to get 2D coordinate of Wifi
wifi_prediction = line.Line.IntersectLine(new_direction, new_direction_2, axis=2)
px = round(wifi_prediction.x, 0)
py1 = round(wifi_prediction.y, 0)
print('2D Coordinate of Wifi Source, X: {}, Y: {}'.format(px, py1))
#################################### YZ PLANE ###########################################
# Moving Along YZ Plane
# Creating a line path where our detector should be moving to detect the source
init_p1, init_p3 = point.Point(initx, inity, initz), point.Point(initx, 1, 1)
# Creating a line along which Detector will move
init_line_2 = line.Line(init_p1, init_p3, axis=3)
# Moving along above defined line to find 2D location in YZ Plane
interation = 100
found = False
counter = 0
step = 1
inten1 = start.Intensity(wifi)
while found is False and counter < interation:
start.MoveAlongLine_yz(init_line_2, step)
inten2 = start.Intensity(wifi)
if inten2 < inten1:
step = -step/3
if abs((inten2 - inten1) *100 / inten1) < 0.005:
found = True
inten1, inten2 = inten2, None
# Intensity of Signal will be maximum when distance between source point and line is Minimum
# At the point of minimum distance (Perpendicular to line),
# Wifi will be present at perpendicular distance to the line
new_direction = init_line_2.GetPerpendicularFromPoint(start, axis=3)
print('Line towards WIfi Source, Slope: {}, Intercept: {}'.format(new_direction.slope_yz, new_direction.intercept_yz))
# Now resetting our detector to origin 0, 0, 0
# Move along another direction to get one more line point towards wifi,
# So that we can find interection between these 2 lines to get 2D Coordinate in YZ Plane of Wifi
start.Goto(initx, inity, initz)
# Moving in the direction of new line from Starting point
interation = 100
found = False
counter = 0
step = 1
inten1 = start.Intensity(wifi)
while found is False and counter < interation:
start.MoveAlongLine_yz(new_direction, step)
inten2 = start.Intensity(wifi)
if inten2 < inten1:
step = -step/3
if abs((inten2 - inten1) *100 / inten1) < 0.005:
found = True
inten1, inten2 = inten2, None
# Intensity of Signal will be maximum when distance between source point and line is Minimum
# At the point of minimum distance (Perpendicular to line),
# Wifi will be present at perpendicular distance to the line
new_direction_2 = new_direction.GetPerpendicularFromPoint(start, axis=3)
# Finding intersection of these 2 lines to get 2D coordinate in YZ Plane of Wifi
wifi_prediction = line.Line.IntersectLine(new_direction, new_direction_2, axis=3)
# YZ Coordinate
py2 = round(wifi_prediction.y, 0)
pz = round(wifi_prediction.z, 0)
# 3D coordinate calculated
print('3D Coordinate Estimated as: ')
print('X: {}, Y: {}, Z: {}'.format(px, (py1 + py2)/2, pz))
print('Original 3D coordinate of Wifi: X: {}, Y: {}, Z: {}'.format(sx, sy, sz)) | StarcoderdataPython |
3260631 | from hubcheck.pageobjects.po_generic_page import GenericPage
from hubcheck.pageobjects.basepageelement import Link
class ToolsPipelinePage(GenericPage):
"""page that lists all tool resources"""
def __init__(self,browser,catalog):
super(ToolsPipelinePage,self).__init__(browser,catalog)
self.path = "/tools/pipeline"
# load hub's classes
ToolsPipelinePage_Locators = self.load_class('ToolsPipelinePage_Locators')
ToolsPipelineSearchForm = self.load_class('ToolsPipelineSearchForm')
# update this object's locator
self.locators.update(ToolsPipelinePage_Locators.locators)
# setup page object's components
self.form = ToolsPipelineSearchForm(self,{'base':'form'})
def search_for(self,terms):
return self.form.search_for(terms)
def goto_page_number(self,pagenumber):
return self.form.goto_page_number(pagenumber)
def goto_page_relative(self,relation):
return self.form.goto_page_relative(relation)
def get_caption_counts(self):
return self.form.get_caption_counts()
def get_pagination_counts(self):
return self.form.get_pagination_counts()
def get_current_page_number(self):
return self.form.get_current_page_number()
def get_link_page_numbers(self):
return self.form.get_link_page_numbers()
def search_result_rows(self):
return self.form.search_result_rows()
class ToolsPipelinePage_Locators_Base(object):
"""locators for ToolsPipelinePage object"""
locators = {
'form' : "css=.main form",
}
| StarcoderdataPython |
4839206 | import logging
from pathlib import Path
from typing import Any, List, Optional, Tuple
import networkx
import osmnx
from more_itertools import pairwise
from networkx_astar_path import astar_path
from . import exceptions, models, weights
from .utils.debug import timeit
from .utils.graph import load_map
logger = logging.getLogger()
class Engine:
graph: networkx.DiGraph
@timeit
def __init__(self, map_path: Path) -> None:
logger.info("Initialise engine")
self.graph = load_map(map_path)
logger.info(
f"Map loaded (edges: {len(self.graph.edges)}, nodes: {len(self.graph.nodes)})"
)
@timeit
def find_path(
self,
origin: models.Node,
destination: models.Node,
weight: weights.WeightFunction,
) -> List[models.Node]:
"""
Calculate a route using the given weight.
"""
if origin.node_id not in self.graph.nodes:
raise exceptions.NodeDoesNotExist(f"{origin} does not exists.")
if origin.node_id not in self.graph.nodes:
raise exceptions.NodeDoesNotExist(f"{origin} does not exists.")
def _weight_wrapper(
graph: networkx.DiGraph,
prev_edge_nodes: Optional[Tuple[Any, Any]],
edge_nodes: Tuple[Any, Any],
) -> float:
prev_edge: Optional[models.Edge] = None
if prev_edge_nodes:
prev_edge = models.Edge.from_graph(self.graph, *prev_edge_nodes)
edge = models.Edge.from_graph(self.graph, *edge_nodes)
return weight(prev_edge, edge)
logger.info(
f"Calculating path from {origin.osm_id} to {destination.osm_id} with {weight}"
)
path = astar_path(
self.graph, origin.node_id, destination.node_id, weight=_weight_wrapper
)
logger.info(f"Found path with {len(path)} items.")
return [models.Node.from_graph(self.graph, node_id) for node_id in path]
@timeit
def route(
self,
origin: models.Location,
destination: models.Location,
weight_func: weights.WeightFunction,
travel_time_func: weights.WeightFunction,
) -> models.Route:
"""
Calculate a shortest path.
"""
origin_node = self.get_closest_node(origin)
destination_node = self.get_closest_node(destination)
path = self.find_path(origin_node, destination_node, weight_func)
costs = self.costs_for_path(path, weight_func)
length = self.length_of_path(path)
travel_time = self.travel_time_of_path(path, func=travel_time_func)
route = models.Route(
costs=round(costs, 2),
length=round(length, 2),
travel_time=round(travel_time, 2),
path=[models.Location(**node.dict()) for node in path],
)
return route
@timeit
def costs_for_path(
self, path: List[models.Node], func: weights.WeightFunction
) -> float:
"""
Calculate the costs for a given path.
"""
edges = (
models.Edge.from_nodes(self.graph, start, end)
for start, end in pairwise(path)
)
costs: float = 0
for index, (prev_edge, edge) in enumerate(pairwise(edges)):
if index == 0:
costs = func(None, prev_edge)
costs += func(prev_edge, edge)
return costs
@timeit
def length_of_path(self, path: List[models.Node]) -> float:
"""
Calculate the length of a given path.
"""
edges = (
models.Edge.from_nodes(self.graph, start, end)
for start, end in pairwise(path)
)
return sum(edge.length for edge in edges)
@timeit
def travel_time_of_path(
self, path: List[models.Node], func: weights.WeightFunction
) -> float:
"""
Calculate the travel time of a given path.
"""
return self.costs_for_path(path, func)
@timeit
def get_closest_node(self, location: models.Location) -> models.Node:
"""
Get the closest node to a GPS location.
"""
node_id = osmnx.get_nearest_node(
self.graph, (location.latitude, location.longitude)
)
node = models.Node.from_graph(self.graph, node_id)
logger.info(f"Found closest node for {location} is {node.osm_id}")
return node
| StarcoderdataPython |
3360098 | """
Test ABC parsing
"""
from pyabc2.parse import INFO_FIELDS, Tune
# Norbeck version
# http://www.norbeck.nu/abc/display.asp?rhythm=jig&ref=12
abc_have_a_drink = """
X:12
T:Have a Drink with Me
R:jig
D:Patrick Street 1.
Z:id:hn-jig-12
M:6/8
K:G
BAG E2D|EGD EGA|BAB GED|EAA ABc|BAG E2D|EGD EGA|BAB GED|EGG G3:|
|:GBd e2d|dgd B2A|GBd edB|cea ~a3|bag age|ged ege|dBG ABc|BGG G3:|
""".strip()
def test_simple_tune():
from pyabc2.key import Key
t = Tune(abc_have_a_drink)
assert t.title == "Have a Drink with Me"
assert t.key == Key("G")
assert t.type == "jig"
def test_info_fields():
assert INFO_FIELDS["T"].name == "tune title"
def test_repeats_no_endings():
abc = """
T:?
L:1
M:4/4
R:reel
K:G
G | A :|
|: B | C :|
"""
# TODO: maybe should be a warning if 2nd `:|` found but not a `|:`
t = Tune(abc)
assert " ".join(n.class_name for n in t.iter_notes()) == "G A G A B C B C"
def test_repeats_with_endings():
abc = """
T:?
L:1
M:4/4
R:reel
K:G
G |1 A | A :|2 a | a ||
|: B |1 C :|2 c ||
"""
t = Tune(abc)
assert " ".join(n.to_abc() for n in t.iter_notes()) == "G A A G a a B C B c"
def test_header_multiple_field_instances():
abc = """
T: hi
T: hii
N: note1
N:note2
""".strip()
t = Tune(abc)
assert t.titles == ["hi", "hii"]
assert t.header["notes"] == "note1"
| StarcoderdataPython |
109268 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com <EMAIL>
from json import loads, dumps
import hashlib
import pylibmc
from thumbor.storages import BaseStorage
from thumbor.utils import logger
from tornado.concurrent import return_future
class Storage(BaseStorage):
def __init__(self, context):
BaseStorage.__init__(self, context)
self.storage = pylibmc.Client(
self.context.config.MEMCACHE_STORAGE_SERVERS,
binary=True,
behaviors={
"tcp_nodelay": True,
'no_block': True,
"ketama": True
}
)
def get_hash(self, msg):
msg = msg.encode('utf-8', 'replace')
return hashlib.sha1(msg).hexdigest()
def key_for(self, url):
return self.get_hash(url)
def crypto_key_for(self, url):
return self.get_hash('thumbor-crypto-%s' % url)
def detector_key_for(self, url):
return self.get_hash('thumbor-detector-%s' % url)
def put(self, path, contents):
key = self.key_for(path)
try:
self.storage.set(key, contents, time=self.context.config.STORAGE_EXPIRATION_SECONDS)
except:
logger.exception("[MEMCACHED] failed to set key '{0}'".format(key));
return path
def put_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return
if not self.context.server.security_key:
raise RuntimeError("STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no SECURITY_KEY specified")
key = self.crypto_key_for(path)
try:
self.storage.set(key, self.context.server.security_key)
except:
logger.exception("[MEMCACHED] failed to set key '{0}'".format(key));
return key
def put_detector_data(self, path, data):
key = self.detector_key_for(path)
try:
self.storage.set(key, dumps(data))
except:
logger.exception("[MEMCACHED] failed to set key '{0}'".format(key));
return key
@return_future
def get_crypto(self, path, callback):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
callback(None)
return
crypto = self.storage.get(self.crypto_key_for(path))
callback(crypto if crypto else None)
@return_future
def get_detector_data(self, path, callback):
data = self.storage.get(self.detector_key_for(path))
callback(loads(data) if data else None)
@return_future
def exists(self, path, callback):
callback(self.storage.get(self.key_for(path)) is not None)
def remove(self, path):
if not self.exists(path).result():
return
return self.storage.delete(self.key_for(path))
@return_future
def get(self, path, callback):
callback(self.storage.get(self.key_for(path)))
| StarcoderdataPython |
1719550 | import torch
from .registry import DATASETS
from .base import BaseDataset
def rotate(img):
'''
img: Tensor(CHW)
'''
return [
img,
torch.flip(img.transpose(1, 2), [1]),
torch.flip(img, [1, 2]),
torch.flip(img, [1]).transpose(1, 2)
]
@DATASETS.register_module
class RotationPredDataset(BaseDataset):
"""Dataset for rotation prediction
"""
def __init__(self, data_source, pipeline):
super(RotationPredDataset, self).__init__(data_source, pipeline)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
img = self.pipeline(img)
img = torch.stack(rotate(img), dim=0)
rotation_labels = torch.LongTensor([0, 1, 2, 3])
return dict(img=img, rot_label=rotation_labels)
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
| StarcoderdataPython |
1789817 | <reponame>amaurirg/Web2Py
# -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
#########################################################################
if request.env.web2py_runtime_gae: # if running on Google App Engine
db = DAL('gae') # connect to Google BigTable
session.connect(request, response, db=db) # and store sessions and tickets there
### or use the following lines to store sessions in Memcache
# from gluon.contrib.memdb import MEMDB
# from google.appengine.api.memcache import Client
# session.connect(request, response, db=MEMDB(Client()))
else: # else use a normal relational database
db = DAL('sqlite://storage.sqlite') # if not, use SQLite or other DB
## if no need for session
# session.forget()
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - crud actions
## comment/uncomment as needed
from gluon.tools import *
auth=Auth(globals(),db) # authentication/authorization
crud=Crud(globals(),db) # for CRUD helpers using auth
service=Service(globals()) # for json, xml, jsonrpc, xmlrpc, amfrpc
# mail=Mail() # mailer
# mail.settings.server='smtp.gmail.com:587' # your SMTP server
# mail.settings.sender='<EMAIL>' # your email
# mail.settings.login='username:password' # your credentials or None
auth.settings.hmac_key='sha512:ffc25cdd-3ed0-4e22-9cd2-f8e914c9222d'
auth.define_tables() # creates all needed tables
db.define_table('state',
Field('name'),format='%(name)s')
db.define_table('city',
Field('name'),
Field('state',db.state),format='%(name)s')
db.define_table('zipcode',
Field('value'),
Field('city',db.city),format='%(value)s')
db.define_table('things',Field('name'),
Field('created_on','date'),
Field('value','integer',default=100),
Field('location',db.zipcode),
Field('rating',requires = IS_IN_SET(range(0,5)), default=1),
Field('stuff', requires = IS_IN_SET(['Apples','Oranges','Bananas','Kiwis','Lemons'],multiple=True)))
db.state.truncate()
db.city.truncate()
db.zipcode.truncate()
db.things.truncate()
db.state.insert(name='Texas')
db.state.insert(name='Illinois')
db.state.insert(name='California')
db.city.insert(name='Austin',state=1)
db.city.insert(name='Dallas',state=1)
db.city.insert(name='Chicago',state=2)
db.city.insert(name='Aurora',state=2)
db.city.insert(name='Los Angeles',state=3)
db.city.insert(name='San Diego',state=3)
db.zipcode.insert(value='78704',city=1)
db.zipcode.insert(value='78745',city=1)
db.zipcode.insert(value='75001',city=2)
db.zipcode.insert(value='75038',city=2)
db.zipcode.insert(value='60606',city=3)
db.zipcode.insert(value='60607',city=3)
db.zipcode.insert(value='60504',city=4)
db.zipcode.insert(value='60505',city=4)
db.zipcode.insert(value='90005',city=5)
db.zipcode.insert(value='90006',city=5)
db.zipcode.insert(value='92101',city=6)
db.zipcode.insert(value='92102',city=6)
from gluon.contrib.populate import populate
populate(db.things,20)
| StarcoderdataPython |
1757204 | #!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: hd_networks_stylegan2.py
# --- Creation Date: 22-04-2020
# --- Last Modified: Thu 23 Apr 2020 23:31:39 AEST
# --- Author: <NAME>
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
StyleGAN2-like networks used in HD models.
"""
import numpy as np
import tensorflow as tf
# from training.networks_stylegan2 import dense_layer
# from training.networks_stylegan2 import apply_bias_act
import training.networks_stylegan2 as networks_stylegan2
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.ops.upfirdn_2d import upsample_2d, downsample_2d, upsample_conv_2d, conv_downsample_2d
#----------------------------------------------------------------------------
# Main generator network.
# Composed of two sub-networks (mapping and synthesis) that are defined below.
# Used in configs B-F (Table 1).
def G_main(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
truncation_psi = 0.5, # Style strength multiplier for the truncation trick. None = disable.
truncation_cutoff = None, # Number of layers for which to apply the truncation trick. None = disable.
truncation_psi_val = None, # Value for truncation_psi to use during validation.
truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation.
dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable.
style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable.
is_training = False, # Network is under training? Enables and disables specific features.
is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi.
return_dlatents = False, # Return dlatents in addition to the images?
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
mapping_func = 'G_mapping', # Build func name for the mapping network.
synthesis_func = 'G_synthesis_stylegan2', # Build func name for the synthesis network.
**kwargs): # Arguments for sub-networks (mapping and synthesis).
# Validate arguments.
assert not is_training or not is_validation
assert isinstance(components, dnnlib.EasyDict)
if is_validation:
truncation_psi = truncation_psi_val
truncation_cutoff = truncation_cutoff_val
if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1):
truncation_psi = None
if is_training:
truncation_cutoff = None
if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1):
dlatent_avg_beta = None
if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0):
style_mixing_prob = None
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=globals()[synthesis_func], **kwargs)
num_layers = components.synthesis.input_shape[1]
dlatent_size = components.synthesis.input_shape[2]
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=globals()[mapping_func], dlatent_broadcast=num_layers, **kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, is_training=is_training, **kwargs)
dlatents = tf.cast(dlatents, tf.float32)
# Update moving average of W.
if dlatent_avg_beta is not None:
with tf.variable_scope('DlatentAvg'):
batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0)
update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta))
with tf.control_dependencies([update_op]):
dlatents = tf.identity(dlatents)
# Perform style mixing regularization.
if style_mixing_prob is not None:
with tf.variable_scope('StyleMix'):
latents2 = tf.random_normal(tf.shape(latents_in))
dlatents2 = components.mapping.get_output_for(latents2, labels_in, is_training=is_training, **kwargs)
dlatents2 = tf.cast(dlatents2, tf.float32)
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2
mixing_cutoff = tf.cond(
tf.random_uniform([], 0.0, 1.0) < style_mixing_prob,
lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32),
lambda: cur_layers)
dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2)
# Apply truncation trick.
if truncation_psi is not None:
with tf.variable_scope('Truncation'):
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
layer_psi = np.ones(layer_idx.shape, dtype=np.float32)
if truncation_cutoff is None:
layer_psi *= truncation_psi
else:
layer_psi = tf.where(layer_idx < truncation_cutoff, layer_psi * truncation_psi, layer_psi)
dlatents = tflib.lerp(dlatent_avg, dlatents, layer_psi)
# Evaluate synthesis network.
deps = []
if 'lod' in components.synthesis.vars:
deps.append(tf.assign(components.synthesis.vars['lod'], lod_in))
with tf.control_dependencies(deps):
images_out = components.synthesis.get_output_for(dlatents, is_training=is_training, force_clean_graph=is_template_graph, **kwargs)
# Return requested outputs.
images_out = tf.identity(images_out, name='images_out')
if return_dlatents:
return images_out, dlatents
return images_out
#----------------------------------------------------------------------------
# G_mapping with hd_dis network.
def G_mapping_hd_dis_to_dlatent(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size = 512, # Latent vector (Z) dimensionality.
label_size = 0, # Label dimensionality, 0 if no labels.
dlatent_size = 512, # Disentangled latent (W) dimensionality.
dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
mapping_layers = 8, # Number of mapping layers.
mapping_fmaps = 512, # Number of activations in the mapping layers.
mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers.
mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers?
dtype = 'float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
act = mapping_nonlinearity
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
# Embed labels and concatenate them with latents.
if label_size:
with tf.variable_scope('LabelConcat'):
w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal())
y = tf.matmul(labels_in, tf.cast(w, dtype))
x = tf.concat([x, y], axis=1)
# # Mapping layers.
# for layer_idx in range(mapping_layers):
# with tf.variable_scope('Dense%d' % layer_idx):
# fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
# x = networks_stylegan2.apply_bias_act(networks_stylegan2.dense_layer(x, fmaps=fmaps, lrmul=mapping_lrmul), act=act, lrmul=mapping_lrmul)
x_list = []
print('dlatent_broadcast:', dlatent_broadcast)
seg = latent_size // dlatent_broadcast
remain = latent_size % dlatent_broadcast
for syn_layer_idx in range(dlatent_broadcast):
with tf.variable_scope('Dis_to_dlatent%d' % syn_layer_idx):
if syn_layer_idx == 0:
start_tmp = 0
end_tmp = seg + remain
else:
start_tmp = end_tmp
end_tmp = end_tmp + seg
# with tf.variable_scope('Bottleneck'):
# tmp = networks_stylegan2.apply_bias_act(networks_stylegan2.dense_layer(x[:, start_tmp:end_tmp], fmaps=mapping_fmaps, lrmul=mapping_lrmul),
# act=act, lrmul=mapping_lrmul)
# with tf.variable_scope('to_dlatent'):
# x_list.append(networks_stylegan2.apply_bias_act(networks_stylegan2.dense_layer(tmp, fmaps=dlatent_size, lrmul=mapping_lrmul),
# act=act, lrmul=mapping_lrmul))
with tf.variable_scope('to_dlatent'):
x_list.append(x[:, start_tmp:end_tmp])
x = tf.stack(x_list, axis=1)
# # Broadcast.
# if dlatent_broadcast is not None:
# with tf.variable_scope('Broadcast'):
# x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# StyleGAN synthesis network with revised architecture (Figure 2d).
# Implements progressive growing, but no skip connections or residual nets (Figure 7).
# Used in configs B-D (Table 1).
def G_synthesis_stylegan_revised_hd(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
fused_modconv = True, # Implement modulated_conv2d_layer() as a single fused op?
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
if is_template_graph: force_clean_graph = True
if force_clean_graph: randomize_noise = False
if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive'
act = nonlinearity
num_layers = resolution_log2 * 2 - 2
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_layers, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
# Noise inputs.
noise_inputs = []
for layer_idx in range(num_layers - 1):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Single convolution layer with all the bells and whistles.
def layer(x, layer_idx, fmaps, kernel, up=False):
x = networks_stylegan2.modulated_conv2d_layer(x, dlatents_in[:, layer_idx], fmaps=fmaps, kernel=kernel, up=up, resample_kernel=resample_kernel, fused_modconv=fused_modconv)
if layer_idx > num_layers * 0.75:
if randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_inputs[layer_idx], x.dtype)
noise_strength = tf.get_variable('noise_strength', shape=[], initializer=tf.initializers.zeros())
x += noise * tf.cast(noise_strength, x.dtype)
return networks_stylegan2.apply_bias_act(x, act=act)
# Early layers.
with tf.variable_scope('4x4'):
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.random_normal())
x = tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1])
with tf.variable_scope('Conv'):
x = layer(x, layer_idx=0, fmaps=nf(1), kernel=3)
# Building blocks for remaining layers.
def block(res, x): # res = 3..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0_up'):
x = layer(x, layer_idx=res*2-5, fmaps=nf(res-1), kernel=3, up=True)
with tf.variable_scope('Conv1'):
x = layer(x, layer_idx=res*2-4, fmaps=nf(res-1), kernel=3)
return x
def torgb(res, x): # res = 2..resolution_log2
with tf.variable_scope('ToRGB_lod%d' % (resolution_log2 - res)):
return networks_stylegan2.apply_bias_act(networks_stylegan2.modulated_conv2d_layer(x, dlatents_in[:, res*2-3], fmaps=num_channels, kernel=1, demodulate=False, fused_modconv=fused_modconv))
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
for res in range(3, resolution_log2 + 1):
x = block(res, x)
images_out = torgb(resolution_log2, x)
# Linear structure: simple but inefficient.
if structure == 'linear':
images_out = torgb(2, x)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(res, x)
img = torgb(res, x)
with tf.variable_scope('Upsample_lod%d' % lod):
images_out = upsample_2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = tflib.lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(x, res, lod):
y = block(res, x)
img = lambda: networks_stylegan2.naive_upsample_2d(torgb(res, y), factor=2**lod)
img = cset(img, (lod_in > lod), lambda: networks_stylegan2.naive_upsample_2d(tflib.lerp(torgb(res, y), upsample_2d(torgb(res - 1, x)), lod_in - lod), factor=2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(x, 3, resolution_log2 - 3)
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
| StarcoderdataPython |
1780078 | #unit test script to test sum module under my_sum
import sys
sys.path.append('/home/user/workarea/projects/learn-pyspark/jobs/samples/')
import unittest
from calc import basic
class TestSum(unittest.TestCase):
def test_list_int(self):
"""
Test that it can sum a list of integers
"""
data=[1,2,3]
result=basic.sum(data)
self.assertEqual(result,6)
def test_list_str(self):
"""
Test that it can sum a list of strings
"""
data=['ina','mina','dica']
result=basic.sum(data)
self.assertEqual(result,'inaminadica')
class TestSubstract(unittest.TestCase):
def test_sub_int(self):
"""
Test substraction of two integers
"""
result=basic.substract(5,2)
self.assertEqual(result,3)
if __name__=='__main__':
unittest.main()
| StarcoderdataPython |
4830413 | <gh_stars>0
import random
def play():
print("Welcome to my game")
secret_number = 42
total_attempts = 3
attempts = 1
for attempts in range (1, total_attempts+1):
##.format is String interpolation
print("You have {} attemps the {}".format(attempts, total_attempts))
chute_str = input("Digite one number between 1 and 100: ")
print("You write ", chute_str)
chute = int(chute_str)
## add conditions in variables
small1 = chute < 1
taller100 = chute > 100
if(small1 or taller100):
print("you need write one number between 1 and 100: ")
continue ## continue return to loop he not break loop
correct = secret_number == chute
taller = chute > secret_number
smaller = chute < secret_number
if(correct):
print("All right !!!")
break ## exit loop
else:
if(taller):
print("You lose ! Your choise is taller than secret number")
elif(smaller):
print("You lose ! Your choise is samller than secret number")
print("GAME OVER") | StarcoderdataPython |
3037 | import platform
import shutil
import tempfile
import warnings
from pathlib import Path
import requests
from tqdm import tqdm
DOCKER_VERSION = "20.10.5"
BUILDX_VERSION = "0.5.1"
CACHE_DIR = Path.home() / ".cache" / "python-on-whales"
TEMPLATE_CLI = (
"https://download.docker.com/{os}/static/stable/{arch}/docker-{version}.tgz"
)
WINDOWS_CLI_URL = "https://github.com/StefanScherer/docker-cli-builder/releases/download/{version}/docker.exe"
def get_docker_binary_path_in_cache():
return CACHE_DIR / "docker-cli" / DOCKER_VERSION / "docker"
def get_docker_cli_url():
user_os = get_user_os()
if user_os == "windows":
return WINDOWS_CLI_URL.format(version=DOCKER_VERSION)
arch = get_arch_for_docker_cli_url()
return TEMPLATE_CLI.format(os=user_os, arch=arch, version=DOCKER_VERSION)
def download_docker_cli():
file_to_download = get_docker_cli_url()
extension = file_to_download.split(".")[-1]
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
downloaded_file_path = tmp_dir / f"docker.{extension}"
download_from_url(file_to_download, downloaded_file_path)
docker_binary_path = get_docker_binary_path_in_cache()
docker_binary_path.parent.mkdir(exist_ok=True, parents=True)
if extension == "tgz":
extract_dir = tmp_dir / "extracted"
shutil.unpack_archive(str(downloaded_file_path), str(extract_dir))
shutil.move(extract_dir / "docker" / "docker", docker_binary_path)
elif extension == "exe":
shutil.move(downloaded_file_path, docker_binary_path)
warnings.warn(
f"The docker client binary file {DOCKER_VERSION} was downloaded and put "
f"in `{docker_binary_path.absolute()}`. \n"
f"You can feel free to remove it if you wish, Python on whales will download "
f"it again if needed."
)
def download_from_url(url, dst):
try:
_download_from_url(url, dst)
except Exception as e:
raise ConnectionError(f"Error while downloading {url}") from e
def _download_from_url(url, dst):
# Streaming, so we can iterate over the response.
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(dst, "wb") as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
raise ConnectionError(
f"Total size should be {total_size_in_bytes}, downloaded {progress_bar.n}"
)
def get_user_os():
user_os = platform.system()
if user_os == "Linux":
return "linux"
elif user_os == "Darwin":
return "mac"
elif user_os == "Windows":
return "windows"
else:
raise NotImplementedError(
f"Unknown OS: {user_os}, cannot determine which Docker CLI binary file to "
f"download. \n"
f"Please open an issue at \n"
f"https://github.com/gabrieldemarmiesse/python-on-whales/issues \n"
f"and in the meantime, install Docker manually to make python-on-whales "
f"work."
)
def get_arch_for_docker_cli_url():
arch = platform.architecture()[0]
# I don't know the exact list of possible architectures,
# so if a user reports a NotImplementedError, we can easily add
# his/her platform here.
arch_mapping = {
"NotImplementedError": "aarch64",
"NotImplementedError2": "armel",
"NotImplementedError3": "armhf",
"NotImplementedError4": "ppc64le",
"NotImplementedError5": "s390x",
"64bit": "x86_64",
}
try:
return arch_mapping[arch]
except KeyError:
raise NotImplementedError(
f"The architecture detected on your system is `{arch}`, the list of "
f"available architectures is {list(arch_mapping.values())}. \n"
f"Please open an issue at \n"
f"https://github.com/gabrieldemarmiesse/python-on-whales/issues "
f"and make sure to copy past this error message. \n"
f"In the meantime, install Docker manually on your system."
)
| StarcoderdataPython |
1734688 | <reponame>koyoo-maxwel/sudoPay
from django import forms
from .models import Account , Profile
from django.contrib.auth.models import User
class UserUpdateForm (forms.ModelForm):
class Meta:
model = User
fields = ['username','email']
class ProfileUpdateForm (forms.ModelForm):
class Meta:
model = Profile
exclude =['qr_id','user']
class AccountForm (forms.ModelForm ):
class Meta:
model = Account
exclude = [ 'owner' ] | StarcoderdataPython |
1670280 | # -*- coding: utf-8 -*-
__title__ = 'latinpigsay'
__license__ = 'MIT'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__created_on__ = '12/3/2014'
"""
Created on Wed Dec 3 17:36:17 2014
@author: steven_c
"""
acidtest = """Can you talk piglatin to piglatin.
"""
quotes = """A Tale of Two Cities LITE(tm)
-- by <NAME>
A lawyer who looks like a French Nobleman is executed in his place.
The Metamorphosis LITE(tm)
-- by <NAME>
A man turns into a bug and his family gets annoyed.
Lord of the Rings LITE(tm)
-- by <NAME>
Some guys take a long vacation to throw a ring into a volcano.
Hamlet LITE(tm)
-- by <NAME>
A college student on vacation with family problems, a screwy
girl-friend and a mother who won't act her age.
"""
paragraphs = """For many people (myself among them), the Python language is easy to fall in love with.
Since its first appearance in 1991, Python has become one of the most popular dynamic,
programming languages, along with Perl, Ruby, and others. Python and Ruby have
become especially popular in recent years for building websites using their numerous
web frameworks, like Rails (Ruby) and Django (Python). Such languages are often
called scripting languages as they can be used to write quick-and-dirty small programs,
or scripts. I don’t like the term “scripting language” as it carries a connotation that they
cannot be used for building mission-critical software. Among interpreted languages
Python is distinguished by its large and active scientific computing community.
Adoption of Python for scientific computing in both industry applications and academic
research has increased significantly since the early 2000s.
For data analysis and interactive, exploratory computing and data visualization, Python
will inevitably draw comparisons with the many other domain-specific open source
and commercial programming languages and tools in wide use, such as R, MATLAB,
SAS, Stata, and others. In recent years, Python’s improved library support (primarily
pandas) has made it a strong alternative for data manipulation tasks. Combined with
Python’s strength in general purpose programming, it is an excellent choice as a single
language for building data-centric applications.
"""
simplepgs = """Simple test.
Paragraphs. test.
Derp, derp a.
Simple test.
Let's sentence ma'am let's full of ain't contractions I'm i'm couldn't've I'd.
Fred's stapler.
Fred's going to the movie.
O'clock o'clock.
Paragraphs. test.
Derp, derp.
"""
contsentence = "Let's sentence ma'am let's full of ain't contractions I'm i'm couldn't've I'd."
sentence = 'If capture groups are used, then the matched text is also included in the result.'
listofwords = ['Pig Latin',
'hello',
'switch',
'glove',
'fruit smoothie',
'egg',
'ultimate',
'I',
'yellow',
'my',
'rhythm',
'436',
'5',
]
txt = """
The Gettysburg Address
Four score and seven years ago our fathers brought forth on this continent,
a new nation, conceived in Liberty, and dedicated to the proposition that all
men are created equal.
Now we are engaged in a great civil war, testing whether that nation, or any
nation so conceived and so dedicated, can long endure. We are met on a great
battlefield of that war. We have come to dedicate a portion of that field, as
a final resting place for those who here gave their lives that that nation
might live. It is altogether fitting and proper that we should do this.
But, in a larger sense, we cannot dedicate - we cannot consecrate - we cannot
hallow - this ground. The brave men, living and dead, who struggled here, have
consecrated it, far above our poor power to add or detract. The world will
little note, nor long remember what we say here, but it can never forget what
they did here. It is for us the living, rather, to be dedicated here to the
unfinished work which they who fought here have thus far so nobly advanced.
It is rather for us to be here dedicated to the great task remaining before
us - that from these honored dead we take increased devotion to that cause for
which they gave the last full measure of devotion - that we here highly resolve
that these dead shall not have died in vain - that this nation, under God,
shall have a new birth of freedom - and that government of the people, by
the people, for the people, shall not perish from the earth.
"""
paragraphs_og = """For many people (myself among them), the Python language is easy to fall in love with.
Since its first appearance in 1991, Python has become one of the most popular dynamic,
programming languages, along with Perl, Ruby, and others. Python and Ruby have
become especially popular in recent years for building websites using their numerous
web frameworks, like Rails (Ruby) and Django (Python). Such languages are often
called scripting languages as they can be used to write quick-and-dirty small programs,
or scripts. I don’t like the term “scripting language” as it carries a connotation that they
cannot be used for building mission-critical software. Among interpreted languages
Python is distinguished by its large and active scientific computing community. Adop-
tion of Python for scientific computing in both industry applications and academic
research has increased significantly since the early 2000s.
For data analysis and interactive, exploratory computing and data visualization, Python
will inevitably draw comparisons with the many other domain-specific open source
and commercial programming languages and tools in wide use, such as R, MATLAB,
SAS, Stata, and others. In recent years, Python’s improved library support (primarily
pandas) has made it a strong alternative for data manipulation tasks. Combined with
Python’s strength in general purpose programming, it is an excellent choice as a single
language for building data-centric applications.
"""
| StarcoderdataPython |
1654472 | <filename>python/custom_transformer/setup.py
# Copyright 2021 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
tests_require = [
'pytest',
'pytest-tornasync',
'mypy'
]
with open('requirements.txt') as f:
REQUIRES = f.readlines()
setup(
name='grpc_image_transformer',
version='0.1.0',
author_email='<EMAIL>',
url='https://github.com/kserve/kserve/python/custom_transformer',
description='gRPCImageTransformer',
python_requires='>=3.7',
install_requires=REQUIRES,
tests_require=tests_require,
extras_require={'test': tests_require}
)
| StarcoderdataPython |
19406 | <reponame>hanyas/pyro_examples
import torch
from torch.distributions import Gamma
import torch.nn.functional as F
import matplotlib.pyplot as plt
from tqdm import tqdm
from pyro.distributions import *
import pyro
from pyro.optim import Adam
from pyro.infer import SVI, Trace_ELBO, Predictive
assert pyro.__version__.startswith('1')
pyro.enable_validation(True)
pyro.set_rng_seed(1337)
torch.set_num_threads(1)
# device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
device = torch.device("cpu")
data = torch.cat((MultivariateNormal(-2 * torch.ones(2), 0.1 * torch.eye(2)).sample([25]),
MultivariateNormal(2 * torch.ones(2), 0.1 * torch.eye(2)).sample([25]),
MultivariateNormal(torch.tensor([0., 0.]), 0.1 * torch.eye(2)).sample([25])))
data = data.to(device)
N = data.shape[0]
D = data.shape[1]
def mix_weights(beta):
beta1m_cumprod = (1 - beta).cumprod(-1)
return F.pad(beta, (0, 1), value=1) * F.pad(beta1m_cumprod, (1, 0), value=1)
def model(data, **kwargs):
with pyro.plate("beta_plate", T - 1):
beta = pyro.sample("beta", Beta(1, alpha))
zeta = 2. * torch.ones(T * D, device=device)
delta = 2. * torch.ones(T * D, device=device)
with pyro.plate("prec_plate", T * D):
prec = pyro.sample("prec", Gamma(zeta, delta))
for t in pyro.plate("corr_chol_plate", T):
corr_chol[t, ...] = pyro.sample("corr_chol_{}".format(t), LKJCorrCholesky(d=D, eta=torch.ones(1, device=device)))
with pyro.plate("mu_plate", T):
_std = torch.sqrt(1. / prec.view(-1, D))
sigma_chol = torch.bmm(torch.diag_embed(_std), corr_chol)
mu = pyro.sample("mu", MultivariateNormal(torch.zeros(T, D, device=device), scale_tril=sigma_chol))
with pyro.plate("data", N):
z = pyro.sample("z", Categorical(mix_weights(beta)))
pyro.sample("obs", MultivariateNormal(mu[z], scale_tril=sigma_chol[z]), obs=data)
def guide(data, **kwargs):
gamma = pyro.param('gamma', alpha * torch.ones(T - 1, device=device), constraint=constraints.positive)
zeta = pyro.param('zeta', lambda: Uniform(1., 2.).sample([T * D]).to(device), constraint=constraints.positive)
delta = pyro.param('delta', lambda: Uniform(1., 2.).sample([T * D]).to(device), constraint=constraints.positive)
psi = pyro.param('psi', lambda: Uniform(1., 2.).sample([T]).to(device), constraint=constraints.positive)
tau = pyro.param('tau', lambda: MultivariateNormal(torch.zeros(D), 10 * torch.eye(2)).sample([T]).to(device))
pi = pyro.param('pi', torch.ones(N, T, device=device) / T, constraint=constraints.simplex)
with pyro.plate("beta_plate", T - 1):
q_beta = pyro.sample("beta", Beta(torch.ones(T - 1, device=device), gamma))
with pyro.plate("prec_plate", T * D):
q_prec = pyro.sample("prec", Gamma(zeta, delta))
q_corr_chol = torch.zeros(T, D, D, device=device)
for t in pyro.plate("corr_chol_plate", T):
q_corr_chol[t, ...] = pyro.sample("corr_chol_{}".format(t), LKJCorrCholesky(d=D, eta=psi[t]))
with pyro.plate("mu_plate", T):
_q_std = torch.sqrt(1. / q_prec.view(-1, D))
q_sigma_chol = torch.bmm(torch.diag_embed(_q_std), q_corr_chol)
q_mu = pyro.sample("mu", MultivariateNormal(tau, scale_tril=q_sigma_chol))
with pyro.plate("data", N):
z = pyro.sample("z", Categorical(pi))
T = 5
optim = Adam({"lr": 0.01})
svi = SVI(model, guide, optim, loss=Trace_ELBO(num_particles=35))
def train(num_iterations):
losses = []
pyro.clear_param_store()
fig = plt.figure(figsize=(5, 5))
for j in tqdm(range(num_iterations)):
loss = svi.step(data)
losses.append(loss)
if (j % 100) == 0:
centers, covars = marginal(guide, num_samples=250)
animate(fig.gca(), centers, covars)
plt.draw()
plt.axis('equal')
plt.pause(0.001)
plt.clf()
return losses
def truncate(alpha, centers, perc, corrs, weights):
threshold = alpha**-1 / 100.
true_centers = centers[weights > threshold]
prec = perc.view(T, D)
true_prec = prec[weights > threshold]
true_corrs = corrs[weights > threshold, ...]
_stds = torch.sqrt(1. / true_prec.view(-1, D))
_sigmas = torch.bmm(torch.diag_embed(_stds), true_corrs)
true_sigmas = torch.zeros(len(_sigmas), D, D)
for n in range(len(_sigmas)):
true_sigmas[n, ...] = torch.mm(_sigmas[n, ...], _sigmas[n, ...].T)
true_weights = weights[weights > threshold] / torch.sum(weights[weights > threshold])
return true_centers, true_sigmas, true_weights
def marginal(guide, num_samples=25):
posterior_predictive = Predictive(guide, num_samples=num_samples)
posterior_samples = posterior_predictive.forward(data)
mu_mean = posterior_samples['mu'].detach().mean(dim=0)
prec_mean = posterior_samples['prec'].detach().mean(dim=0)
corr_mean = torch.zeros(T, D, D)
for t in range(T):
corr_mean[t, ...] = posterior_samples['corr_chol_{}'.format(t)].detach().mean(dim=0)
beta_mean = posterior_samples['beta'].detach().mean(dim=0)
weights_mean = mix_weights(beta_mean)
centers, sigmas, _ = truncate(alpha, mu_mean, prec_mean, corr_mean, weights_mean)
return centers, sigmas
def animate(axes, centers, covars):
plt.scatter(data[:, 0], data[:, 1], color="blue", marker="+")
from math import pi
t = torch.arange(0, 2 * pi, 0.01)
circle = torch.stack([torch.sin(t), torch.cos(t)], dim=0)
axes.scatter(centers[:, 0], centers[:, 1], color="red")
for n in range(len(covars)):
ellipse = torch.mm(torch.cholesky(covars[n, ...]), circle)
axes.plot(ellipse[0, :] + centers[n, 0], ellipse[1, :] + centers[n, 1],
linestyle='-', linewidth=2, color='g', alpha=1.)
alpha = 0.1 * torch.ones(1, device=device)
elbo = train(5000)
plt.figure()
plt.plot(elbo)
| StarcoderdataPython |
69657 | <reponame>PKUfudawei/cmssw
import FWCore.ParameterSet.Config as cms
process = cms.Process("TrackerMapProd")
process.MessageLogger = cms.Service("MessageLogger",
cablingReader = cms.untracked.PSet(
threshold = cms.untracked.string('INFO')
),
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
debugModules = cms.untracked.vstring(''),
files = cms.untracked.PSet(
cablingMap = cms.untracked.PSet(
)
)
)
import CalibTracker.Configuration.Common.PoolDBESSource_cfi
process.siStripCond = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone()
process.siStripCond.toGet = cms.VPSet(cms.PSet(
record = cms.string('SiStripFedCablingRcd'),
tag = cms.string('SiStripFedCabling_Fake_30X')
))
process.siStripCond.connect = 'sqlite_file:SiStripConditionsDBFile.db'
process.siStripCond.DBParameters.authenticationPath = '/afs/cern.ch/cms/DB/conddb'
process.sistripconn = cms.ESProducer("SiStripConnectivity")
process.load("DQMServices.Core.DQM_cfg")
process.siStripCablingTrackerMap = cms.EDAnalyzer("SiStripCablingTrackerMap")
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(50908),
lastValue = cms.uint64(50908),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.p = cms.Path(process.siStripCablingTrackerMap)
| StarcoderdataPython |
3236614 | <gh_stars>0
from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title('Image Viewer')
root.iconbitmap('im_logo.ico')
frame=LabelFrame(root, text="I am here",padx=5,pady=5).grid(row=0,column=0)
b= Button(frame,text="This will make you exit",command=root.quit).grid(row=0,column=1)
image1 = ImageTk.PhotoImage(Image.open("im1 (1).png"))
image2 = ImageTk.PhotoImage(Image.open("im1 (2).png"))
image3 = ImageTk.PhotoImage(Image.open("im1 (3).png"))
image4 = ImageTk.PhotoImage(Image.open("im1 (4).png"))
image5 = ImageTk.PhotoImage(Image.open("im1 (5).png"))
image6 = ImageTk.PhotoImage(Image.open("im1 (6).png"))
image7 = ImageTk.PhotoImage(Image.open("im1 (7).png"))
image8 = ImageTk.PhotoImage(Image.open("im1 (8).png"))
image9 = ImageTk.PhotoImage(Image.open("im1 (9).png"))
image10 = ImageTk.PhotoImage(Image.open("im1 (10).png"))
image11 = ImageTk.PhotoImage(Image.open("im1 (11).png"))
image12 = ImageTk.PhotoImage(Image.open("im1 (12).png"))
image13 = ImageTk.PhotoImage(Image.open("im1 (13).png"))
image14 = ImageTk.PhotoImage(Image.open("im1 (14).png"))
image15 = ImageTk.PhotoImage(Image.open("im1 (15).png"))
image16 = ImageTk.PhotoImage(Image.open("im1 (16).png"))
image17 = ImageTk.PhotoImage(Image.open("im1 (17).png"))
image18 = ImageTk.PhotoImage(Image.open("im1 (18).png"))
image19 = ImageTk.PhotoImage(Image.open("im1 (19).png"))
image20 = ImageTk.PhotoImage(Image.open("im1 (20).png"))
image21 = ImageTk.PhotoImage(Image.open("im1 (21).png"))
image22 = ImageTk.PhotoImage(Image.open("im1 (22).png"))
image23 = ImageTk.PhotoImage(Image.open("im1 (23).png"))
image24 = ImageTk.PhotoImage(Image.open("im1 (24).png"))
image25 = ImageTk.PhotoImage(Image.open("im1 (25).png"))
image_list = [image1,image2,image3,image4,image5,image6,image7,image8,image9,image10,image11,image12,image13,image14,image15,image16,image17,image18,image19,image20,image21,image22,image23,image24,image25,
]
status=Label(root, text=f"Image 1 of{len(image_list)}",bd=1,relief=SUNKEN,anchor=E).grid(row=3 ,column=0,columnspan=3,sticky=W+E)
my_label = Label(image=image1)
my_label.grid(row=1, column=0, columnspan=3)
def forward(image_number):
global my_label
global button_forward
global button_back
my_label.grid_forget()
my_label = Label(image=image_list[image_number - 1])
button_forward = Button(frame, text=">>", command=lambda: forward(image_number + 1))
button_back = Button(frame, text="<<", command=lambda: back(image_number - 1))
if image_number == len(image_list):
image_number=1
my_label.grid(row=1, column=0, columnspan=3)
button_back.grid(row=2, column=0)
button_forward.grid(row=2, column=2)
status = Label(frame, text=f"Image {str(image_number)} of{len(image_list)}", bd=1, relief=SUNKEN, anchor=E).grid(row=3, column=0,
columnspan=3,
sticky=W + E)
def back(image_number):
global my_label
global button_forward
global button_back
my_label.grid_forget()
my_label = Label(image=image_list[image_number - 1])
button_forward = Button(frame, text=">>", command=lambda: forward(image_number + 1))
button_back = Button(frame, text="<<", command=lambda: back(image_number - 1))
if image_number == 0:
image_number=len(image_list)
my_label.grid(row=1, column=0, columnspan=3)
button_back.grid(row=2, column=0)
button_forward.grid(row=2, column=2)
status = Label(frame, text=f"Image {str(image_number)} of{len(image_list)}", bd=1, relief=SUNKEN, anchor=E).grid(
row=3, column=0,
columnspan=3,
sticky=W + E)
button_back = Button(frame, text="<<", command=back, state=DISABLED)
button_exit = Button(frame, text="Exit Program", command=root.quit)
button_forward = Button(frame, text=">>", command=lambda: forward(2))
button_back.grid(row=2, column=0)
button_exit.grid(row=2, column=1,pady=10)
button_forward.grid(row=2, column=2)
root.mainloop()
| StarcoderdataPython |
1699600 | import pyparsing
from pyparsing import Word, WordStart, WordEnd, ZeroOrMore, Optional
class reference_patterns:
def __init__(self):
real_word_dashes = Word(pyparsing.alphas + "-")
punctuation = Word(".!?:,;-")
punctuation_no_dash = Word(".!?:,;")
punctuation_reference_letter = Word(".:,;-")
printable = Word(pyparsing.printables, exact=1)
letter = Word(pyparsing.alphas, exact=1)
letter_reference = punctuation_reference_letter + letter
nums = (
Word(pyparsing.nums)
+ Optional(letter)
+ ZeroOrMore(letter_reference)
)
word_end = (
pyparsing.ZeroOrMore(Word(")") | Word("}") | Word("]"))
+ Optional(punctuation_no_dash)
+ WordEnd()
)
self.single_number = WordStart() + real_word_dashes + nums + word_end
self.single_number_parens = (
printable
+ letter
+ Optional(punctuation_no_dash)
+ pyparsing.OneOrMore(
Word("([{", exact=1)
+ pyparsing.OneOrMore(nums | Word("-"))
+ Word(")]}", exact=1)
)
+ Optional(punctuation_no_dash)
+ word_end
)
self.number_then_punctuation = (
printable
+ letter
+ nums
+ punctuation
+ pyparsing.ZeroOrMore(nums | punctuation)
+ word_end
)
self.punctuation_then_number = (
printable
+ letter
+ punctuation_no_dash
+ nums
+ pyparsing.ZeroOrMore(punctuation | nums)
+ word_end
)
| StarcoderdataPython |
183222 | <filename>src/django_clickhouse/routers.py
"""
This file defines router to find appropriate database
"""
from typing import Type
import random
import six
from infi.clickhouse_orm.migrations import Operation, DropTable, CreateTable
from .clickhouse_models import ClickHouseModel
from .configuration import config
from .utils import lazy_class_import
class DefaultRouter:
def db_for_read(self, model: Type[ClickHouseModel], **hints) -> str:
"""
Gets database to read from for model
:param model: Model to decide for
:param hints: Some hints to make correct choice
:return: Database alias
"""
return random.choice(model.read_db_aliases)
def db_for_write(self, model: Type[ClickHouseModel], **hints) -> str:
"""
Gets database to write to for model
:param model: Model to decide for
:param hints: Some hints to make correct choice
:return: Database alias
"""
return random.choice(model.write_db_aliases)
def allow_migrate(self, db_alias: str, app_label: str, operation: Operation,
model=None, **hints) -> bool:
"""
Checks if migration can be applied to given database
:param db_alias: Database alias to check
:param app_label: App from which migration is got
:param operation: Operation object to perform
:param model: Model migration is applied to
:param hints: Hints to make correct decision
:return: boolean
"""
if hints.get("force_migrate_on_databases", None):
return db_alias in hints["force_migrate_on_databases"]
if hints.get('model'):
model = '%s.%s.%s' % (app_label, config.MODELS_MODULE, hints['model']) \
if isinstance(hints['model'], six.string_types) else hints['model']
model = lazy_class_import(model)
if operation.__class__ not in {CreateTable, DropTable}:
return db_alias in model.migrate_replicated_db_aliases
else:
return db_alias in model.migrate_non_replicated_db_aliases
| StarcoderdataPython |
105813 | import collections.abc
from functools import partial
from urllib.parse import urlencode
from geopy.exc import ConfigurationError, GeocoderQueryError
from geopy.geocoders.base import _DEFAULT_USER_AGENT, DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("Nominatim", )
_DEFAULT_NOMINATIM_DOMAIN = 'nominatim.openstreetmap.org'
_REJECTED_USER_AGENTS = (
# Various sample user-agent strings mentioned in docs:
"my-application",
"my_app/1",
"my_user_agent/1.0",
"specify_your_app_name_here",
_DEFAULT_USER_AGENT,
)
class Nominatim(Geocoder):
"""Nominatim geocoder for OpenStreetMap data.
Documentation at:
https://nominatim.org/release-docs/develop/api/Overview/
.. attention::
Using Nominatim with the default `user_agent` is strongly discouraged,
as it violates Nominatim's Usage Policy
https://operations.osmfoundation.org/policies/nominatim/
and may possibly cause 403 and 429 HTTP errors. Please make sure
to specify a custom `user_agent` with
``Nominatim(user_agent="my-application")`` or by
overriding the default `user_agent`:
``geopy.geocoders.options.default_user_agent = "my-application"``.
An exception will be thrown if a custom `user_agent` is not specified.
"""
structured_query_params = {
'street',
'city',
'county',
'state',
'country',
'postalcode',
}
geocode_path = '/search'
reverse_path = '/reverse'
def __init__(
self,
*,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
domain=_DEFAULT_NOMINATIM_DOMAIN,
scheme=None,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None
# Make sure to synchronize the changes of this signature in the
# inheriting classes (e.g. PickPoint).
):
"""
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str domain: Domain where the target Nominatim service
is hosted.
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
.. versionadded:: 2.0
"""
super().__init__(
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
self.domain = domain.strip('/')
if (self.domain == _DEFAULT_NOMINATIM_DOMAIN
and self.headers['User-Agent'] in _REJECTED_USER_AGENTS):
raise ConfigurationError(
'Using Nominatim with default or sample `user_agent` "%s" is '
'strongly discouraged, as it violates Nominatim\'s ToS '
'https://operations.osmfoundation.org/policies/nominatim/ '
'and may possibly cause 403 and 429 HTTP errors. '
'Please specify a custom `user_agent` with '
'`Nominatim(user_agent="my-application")` or by '
'overriding the default `user_agent`: '
'`geopy.geocoders.options.default_user_agent = "my-application"`.'
% self.headers['User-Agent']
)
self.api = "%s://%s%s" % (self.scheme, self.domain, self.geocode_path)
self.reverse_api = "%s://%s%s" % (self.scheme, self.domain, self.reverse_path)
def _construct_url(self, base_api, params):
"""
Construct geocoding request url.
The method can be overridden in Nominatim-based geocoders in order
to extend URL parameters.
:param str base_api: Geocoding function base address - self.api
or self.reverse_api.
:param dict params: Geocoding params.
:return: string URL.
"""
return "?".join((base_api, urlencode(params)))
def geocode(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
limit=None,
addressdetails=False,
language=False,
geometry=None,
extratags=False,
country_codes=None,
viewbox=None,
bounded=False,
featuretype=None,
namedetails=False
):
"""
Return a location point by address.
:param query: The address, query or a structured query
you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `street`, `city`, `county`, `state`, `country`, or
`postalcode`. For more information, see Nominatim's
documentation for `structured requests`:
https://nominatim.org/release-docs/develop/api/Search
:type query: dict or str
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param int limit: Maximum amount of results to return from Nominatim.
Unless exactly_one is set to False, limit will always be 1.
:param bool addressdetails: If you want in *Location.raw* to include
addressdetails such as city_district, etc set it to True
:param str language: Preferred language in which to return results.
Either uses standard
`RFC2616 <http://www.ietf.org/rfc/rfc2616.txt>`_
accept-language string or a simple comma-separated
list of language codes.
:param str geometry: If present, specifies whether the geocoding
service should return the result's geometry in `wkt`, `svg`,
`kml`, or `geojson` formats. This is available via the
`raw` attribute on the returned :class:`geopy.location.Location`
object.
:param bool extratags: Include additional information in the result if available,
e.g. wikipedia link, opening hours.
:param country_codes: Limit search results
to a specific country (or a list of countries).
A country_code should be the ISO 3166-1alpha2 code,
e.g. ``gb`` for the United Kingdom, ``de`` for Germany, etc.
:type country_codes: str or list
:type viewbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param viewbox: Coordinates to restrict search within.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:param bool bounded: Restrict the results to only items contained
within the bounding view_box.
:param str featuretype: If present, restrict results to certain type of features.
Allowed values: `country`, `state`, `city`, `settlement`.
:param bool namedetails: If you want in *Location.raw* to include
namedetails, set it to True. This will be a list of alternative names,
including language variants, etc.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, collections.abc.Mapping):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
else:
params = {'q': query}
params.update({
'format': 'json'
})
if exactly_one:
params['limit'] = 1
elif limit is not None:
limit = int(limit)
if limit < 1:
raise ValueError("Limit cannot be less than 1")
params['limit'] = limit
if viewbox:
params['viewbox'] = self._format_bounding_box(
viewbox, "%(lon1)s,%(lat1)s,%(lon2)s,%(lat2)s")
if bounded:
params['bounded'] = 1
if not country_codes:
country_codes = []
if isinstance(country_codes, str):
country_codes = [country_codes]
if country_codes:
params['countrycodes'] = ",".join(country_codes)
if addressdetails:
params['addressdetails'] = 1
if namedetails:
params['namedetails'] = 1
if language:
params['accept-language'] = language
if extratags:
params['extratags'] = True
if geometry is not None:
geometry = geometry.lower()
if geometry == 'wkt':
params['polygon_text'] = 1
elif geometry == 'svg':
params['polygon_svg'] = 1
elif geometry == 'kml':
params['polygon_kml'] = 1
elif geometry == 'geojson':
params['polygon_geojson'] = 1
else:
raise GeocoderQueryError(
"Invalid geometry format. Must be one of: "
"wkt, svg, kml, geojson."
)
if featuretype:
params['featuretype'] = featuretype
url = self._construct_url(self.api, params)
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def reverse(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
language=False,
addressdetails=True,
zoom=None
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str language: Preferred language in which to return results.
Either uses standard
`RFC2616 <http://www.ietf.org/rfc/rfc2616.txt>`_
accept-language string or a simple comma-separated
list of language codes.
:param bool addressdetails: Whether or not to include address details,
such as city, county, state, etc. in *Location.raw*
:param int zoom: Level of detail required for the address,
an integer in range from 0 (country level) to 18 (building level),
default is 18.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
try:
lat, lon = self._coerce_point_to_string(query).split(',')
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'lat': lat,
'lon': lon,
'format': 'json',
}
if language:
params['accept-language'] = language
params['addressdetails'] = 1 if addressdetails else 0
if zoom is not None:
params['zoom'] = zoom
url = self._construct_url(self.reverse_api, params)
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _parse_code(self, place):
# Parse each resource.
latitude = place.get('lat', None)
longitude = place.get('lon', None)
placename = place.get('display_name', None)
if latitude is not None and longitude is not None:
latitude = float(latitude)
longitude = float(longitude)
return Location(placename, (latitude, longitude), place)
def _parse_json(self, places, exactly_one):
if not places:
return None
if not isinstance(places, collections.abc.Sequence):
places = [places]
if exactly_one:
return self._parse_code(places[0])
else:
return [self._parse_code(place) for place in places]
| StarcoderdataPython |
1679962 | frase = str(input('Digite uma frase qualquer: ')).strip().upper()
#Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra “A”,
print(f'A letra "A" apareceu', frase.count('A'), 'vezes')
# em que posição ela aparece a primeira vez
print(f'A letra "A" apareceu na posição', frase.find('A')+1)
# e em que posição ela aparece a última vez.
print(f'A letra "A" apareceu pela ultima vez na posição', frase.rfind('A')+1) | StarcoderdataPython |
36369 | <reponame>MilosRasic98/Orbweaver-Rover
#!/usr/bin/env python3
import rospy
import serial
from std_msgs.msg import Float32
tt_arduino = serial.Serial("/dev/ttyUSB0", 9600)
rospy.init_node('torque_test_stand', anonymous = False)
pub = rospy.Publisher('/test_equipment/measured_torque', Float32, queue_size=10)
r = rospy.Rate(10)
print('Torque Test Stand Node Started!')
while not rospy.is_shutdown():
raw_data = str(tt_arduino.readline())
extracted_data = raw_data[2:raw_data.find('\\r')]
converted_data = float(extracted_data)
pub.publish(converted_data)
r.sleep() | StarcoderdataPython |
3222070 | """
Functions to unpack Simrad EK60 .raw and save to .nc.
Pieces for unpacking power data came from:
https://github.com/oceanobservatories/mi-instrument (authors: <NAME> & <NAME>)
with modifications:
- python 3.6 compatibility
- strip off dependency on other mi-instrument functions
- unpack split-beam angle data
- unpack various additional variables needed for calibration
"""
import re
import os
from collections import defaultdict
from struct import unpack_from, unpack
import numpy as np
from datetime import datetime as dt
from matplotlib.dates import date2num
import pytz
from .set_nc_groups import SetGroups
from echopype._version import get_versions
ECHOPYPE_VERSION = get_versions()['version']
del get_versions
class ConvertEK60(object):
"""Class for converting EK60 .raw files."""
def __init__(self, _filename=""):
self.filename = _filename # path to EK60 .raw filename to be parsed
# Constants for unpacking .raw files
self.BLOCK_SIZE = 1024*4 # Block size read in from binary file to search for token
self.LENGTH_SIZE = 4
self.DATAGRAM_HEADER_SIZE = 12
self.CONFIG_HEADER_SIZE = 516
self.CONFIG_TRANSDUCER_SIZE = 320
# Set global regex expressions to find all sample, annotation and NMEA sentences
self.SAMPLE_REGEX = b'RAW\d{1}'
self.SAMPLE_MATCHER = re.compile(self.SAMPLE_REGEX, re.DOTALL)
self.FILENAME_REGEX = r'(?P<prefix>\S*)-D(?P<date>\d{1,})-T(?P<time>\d{1,})'
self.FILENAME_MATCHER = re.compile(self.FILENAME_REGEX, re.DOTALL)
# Reference time "seconds since 1900-01-01 00:00:00"
self.REF_TIME = date2num(dt(1900, 1, 1, 0, 0, 0))
self.WINDOWS_EPOCH = dt(1601, 1, 1)
self.NTP_EPOCH = dt(1900, 1, 1)
self.NTP_WINDOWS_DELTA = (self.NTP_EPOCH - self.WINDOWS_EPOCH).total_seconds()
# Numpy data type object for unpacking the Sample datagram including the header from binary *.raw
sample_dtype = np.dtype([('length1', 'i4'), # 4 byte int (long)
# Datagram header
('datagram_type', 'a4'), # 4 byte string
('low_date_time', 'u4'), # 4 byte int (long)
('high_date_time', 'u4'), # 4 byte int (long)
# Sample datagram
('channel_number', 'i2'), # 2 byte int (short)
('mode', 'i2'), # 2 byte int (short): whether split-beam or single-beam
('transducer_depth', 'f4'), # 4 byte float
('frequency', 'f4'), # 4 byte float
('transmit_power', 'f4'), # 4 byte float
('pulse_length', 'f4'), # 4 byte float
('bandwidth', 'f4'), # 4 byte float
('sample_interval', 'f4'), # 4 byte float
('sound_velocity', 'f4'), # 4 byte float
('absorption_coefficient', 'f4'), # 4 byte float
('heave', 'f4'), # 4 byte float
('roll', 'f4'), # 4 byte float
('pitch', 'f4'), # 4 byte float
('temperature', 'f4'), # 4 byte float
('trawl_upper_depth_valid', 'i2'), # 2 byte int (short)
('trawl_opening_valid', 'i2'), # 2 byte int (short)
('trawl_upper_depth', 'f4'), # 4 byte float
('trawl_opening', 'f4'), # 4 byte float
('offset', 'i4'), # 4 byte int (long)
('count', 'i4')]) # 4 byte int (long): number of items to unpack for power_data
self.sample_dtype = sample_dtype.newbyteorder('<')
self.power_dtype = np.dtype([('power_data', '<i2')]) # 2 byte int (short)
self.angle_dtype = np.dtype([('athwartship', '<i1'), ('alongship', '<i1')]) # 1 byte ints
# Initialize other params that will be unpacked from data
self.config_header = None
self.config_transducer = None
self.first_ping_metadata = None
self.data_times = None
self.motion = None
self.power_data_dict = None
self.angle_data_dict = None
self.tr_data_dict = None
self.nc_path = None
@property
def filename(self):
return self._filename
@filename.setter
def filename(self, p):
pp = os.path.basename(p)
_, ext = os.path.splitext(pp)
if ext != '.raw':
raise ValueError('Please specify a .raw file.')
# print('Data file in manufacturer format, please convert first.')
# print('To convert data, follow the steps below:')
else:
self._filename = p
@staticmethod
def _read_config_header(chunk):
"""Reading EK60 .raw configuration header information from the byte string passed in as a chunk.
This method unpacks info from configuration header into self.config_header
Parameters
----------
chunk : int
data chunk to read the config header from
"""
# setup unpack structure and field names
field_names = ('survey_name', 'transect_name', 'sounder_name',
'version', 'transducer_count')
fmt = '<128s128s128s30s98sl'
# read in the values from the byte string chunk
values = list(unpack(fmt, chunk))
values.pop(4) # drop the spare field
# strip the trailing zero byte padding from the strings
# for i in [0, 1, 2, 3]:
for i in range(4):
values[i] = values[i].strip(b'\x00')
# create the configuration header dictionary
return dict(zip(field_names, values))
@staticmethod
def _read_config_transducer(chunk):
"""Reading EK60 .raw configuration transducer information from the byte string passed in as a chunk.
This method unpacks info from transducer header info self.config_transducer
Parameters
----------
chunk : int
data chunk to read the configuration transducer information from
"""
# setup unpack structure and field names
field_names = ('channel_id', 'beam_type', 'frequency', 'gain',
'equiv_beam_angle', 'beam_width_alongship', 'beam_width_athwartship',
'angle_sensitivity_alongship', 'angle_sensitivity_athwartship',
'angle_offset_alongship', 'angle_offset_athwartship', 'pos_x', 'pos_y',
'pos_z', 'dir_x', 'dir_y', 'dir_z', 'pulse_length_table', 'gain_table',
'sa_correction_table', 'gpt_software_version')
fmt = '<128sl15f5f8s5f8s5f8s16s28s'
# read in the values from the byte string chunk
values = list(unpack(fmt, chunk))
# convert some of the values to arrays
pulse_length_table = np.array(values[17:22])
gain_table = np.array(values[23:28])
sa_correction_table = np.array(values[29:34])
# strip the trailing zero byte padding from the strings
for i in [0, 35]:
values[i] = values[i].strip(b'\x00')
# put it back together, dropping the spare strings
config_transducer = dict(zip(field_names[0:17], values[0:17]))
config_transducer[field_names[17]] = pulse_length_table
config_transducer[field_names[18]] = gain_table
config_transducer[field_names[19]] = sa_correction_table
config_transducer[field_names[20]] = values[35]
return config_transducer
def read_header(self, file_handle):
"""Read header and transducer config from EK60 raw data file.
This method calls private methods _read_config_header() and _read_config_transducer() to
populate self.config_header and self.config_transducer.
"""
# Read binary file a block at a time
raw = file_handle.read(self.BLOCK_SIZE)
# Read the configuration datagram, output at the beginning of the file
length1, = unpack_from('<l', raw)
byte_cnt = self.LENGTH_SIZE
# Configuration datagram header
byte_cnt += self.DATAGRAM_HEADER_SIZE
# Configuration: header
config_header = self._read_config_header(raw[byte_cnt:byte_cnt + self.CONFIG_HEADER_SIZE])
byte_cnt += self.CONFIG_HEADER_SIZE
config_transducer = []
for num_transducer in range(config_header['transducer_count']):
config_transducer.append(self._read_config_transducer(raw[byte_cnt:byte_cnt + self.CONFIG_TRANSDUCER_SIZE]))
byte_cnt += self.CONFIG_TRANSDUCER_SIZE
# Compare length1 (from beginning of datagram) to length2 (from the end of datagram) to
# the actual number of bytes read. A mismatch can indicate an invalid, corrupt, misaligned,
# or missing configuration datagram or a reverse byte order binary data file.
# A bad/missing configuration datagram header is a significant error.
length2, = unpack_from('<l', raw, byte_cnt)
if not (length1 == length2 == byte_cnt-self.LENGTH_SIZE):
print('Possible file corruption or format incompatibility.')
# raise InstrumentDataException(
# "Length of configuration datagram and number of bytes read do not match: length1: %s"
# ", length2: %s, byte_cnt: %s. Possible file corruption or format incompatibility." %
# (length1, length2, byte_cnt+LENGTH_SIZE))
byte_cnt += self.LENGTH_SIZE
file_handle.seek(byte_cnt)
# Populate class attributes
self.config_header = config_header
self.config_transducer = config_transducer
def _windows_to_ntp(self, windows_time):
"""Convert a windows file timestamp into Network Time Protocol.
Parameters
----------
windows_time
100ns since Windows time epoch
Returns
-------
timestamp into Network Time Protocol (NTP).
"""
return windows_time / 1e7 - self.NTP_WINDOWS_DELTA
@staticmethod
def _build_windows_time(high_word, low_word):
"""Generate Windows time value from high and low date times.
Parameters
----------
high_word
high word portion of the Windows datetime
low_word
low word portion of the Windows datetime
Returns
-------
time in 100ns since 1601/01/01 00:00:00 UTC
"""
return (high_word << 32) + low_word
def process_sample(self, input_file, transducer_count):
"""Processing one sample at a time from input_file.
Parameters
----------
input_file
EK60 raw data file name
transducer_count : int
number of transducers
Returns
-------
data contained in each sample, in the following sequence:
channel, ntp_time, sample_data, power_data, angle_data
"""
# log.trace('Processing one sample from input_file: %r', input_file)
# print('Processing one sample from input_file')
# Read and unpack the Sample Datagram into numpy array
sample_data = np.fromfile(input_file, dtype=self.sample_dtype, count=1)
channel = sample_data['channel_number'][0]
# Check for a valid channel number that is within the number of transducers config
# to prevent incorrectly indexing into the dictionaries.
# An out of bounds channel number can indicate invalid, corrupt,
# or misaligned datagram or a reverse byte order binary data file.
# Log warning and continue to try and process the rest of the file.
if channel < 0 or channel > transducer_count:
print('Invalid channel: %s for transducer count: %s. \n\
Possible file corruption or format incompatibility.' % (channel, transducer_count))
# Convert high and low bytes to internal time
windows_time = self._build_windows_time(sample_data['high_date_time'][0], sample_data['low_date_time'][0])
ntp_time = self._windows_to_ntp(windows_time)
count = sample_data['count'][0]
# Extract array of power data
power_data = np.fromfile(input_file, dtype=self.power_dtype, count=count).astype('f8')
# Read the athwartship and alongship angle measurements
if sample_data['mode'][0] > 1:
angle_data = np.fromfile(input_file, dtype=self.angle_dtype, count=count)
else:
angle_data = []
# Read and compare length1 (from beginning of datagram) to length2
# (from the end of datagram). A mismatch can indicate an invalid, corrupt,
# or misaligned datagram or a reverse byte order binary data file.
# Log warning and continue to try and process the rest of the file.
len_dtype = np.dtype([('length2', '<i4')]) # 4 byte int (long)
length2_data = np.fromfile(input_file, dtype=len_dtype, count=1)
if not (sample_data['length1'][0] == length2_data['length2'][0]):
print('Mismatching beginning and end length values in sample datagram: \n\
length1: %d, length2: %d.\n\
Possible file corruption or format incompatibility.'
% (sample_data['length1'][0], length2_data['length2'][0]))
return channel, ntp_time, sample_data, power_data, angle_data
@staticmethod
def append_metadata(metadata, channel, sample_data):
"""Store metadata when reading the first ping of all channels.
Parameters
----------
metadata
first_ping_metadata[channel] to be saved to
channel
channel from which metadata is being read
sample_data
unpacked sample data from process_sample()
"""
# Fixed across ping
metadata['channel'].append(channel)
metadata['transducer_depth'].append(sample_data['transducer_depth'][0]) # [meters]
metadata['frequency'].append(sample_data['frequency'][0]) # [Hz]
metadata['sound_velocity'].append(sample_data['sound_velocity'][0]) # [m/s]
metadata['absorption_coeff'].append(sample_data['absorption_coefficient'][0]) # [dB/m]
metadata['temperature'].append(sample_data['temperature'][0]) # [degC]
metadata['mode'].append(sample_data['mode'][0]) # >1: split-beam, 0: single-beam
return metadata # this may be removed?
def load_ek60_raw(self):
"""Method to parse the *.raw file.
"""
print('%s converting file: %s' % (dt.now().strftime('%H:%M:%S'), os.path.basename(self.filename)))
with open(self.filename, 'rb') as input_file: # read ('r') input file using binary mode ('b')
self.read_header(input_file) # unpack info to self.config_header and self.config_transducer
position = input_file.tell()
# *_data_temp_dict are for storing different channels within each ping
# content of *_temp_dict are saved to *_data_dict whenever all channels of the same ping are unpacked
# see below comment "Check if we have enough records to produce a new row of data"
# Initialize output structure
first_ping_metadata = defaultdict(list) # metadata for each channel
power_data_dict = defaultdict(list) # echo power
angle_data_dict = defaultdict(list) # alongship and athwartship electronic angle
tr_data_dict = defaultdict(list) # transmit signal metadata
data_times = [] # ping time
motion = [] # pitch, roll, heave
# Read binary file a block at a time
raw = input_file.read(self.BLOCK_SIZE)
# Flag used to check if data are from the same ping
last_time = None
while len(raw) > 4:
# We only care for the Sample datagrams, skip over all the other datagrams
match = self.SAMPLE_MATCHER.search(raw)
if match:
# Offset by size of length value
match_start = match.start() - self.LENGTH_SIZE
# Seek to the position of the length data before the token to read into numpy array
input_file.seek(position + match_start)
# try:
next_channel, next_time, next_sample, next_power, next_angle = \
self.process_sample(input_file, self.config_header['transducer_count']) # read each sample
# Check if it's from different channels within the same ping
# next_time=last_time when it's the same ping but different channel
if next_time != last_time: # if data is from a new ping
# Clear out our temporary dictionaries and set the last time to this time
sample_temp_dict = defaultdict(list)
power_temp_dict = defaultdict(list)
angle_temp_dict = defaultdict(list) # include both alongship and athwartship angle
last_time = next_time # update ping time
# Store this data
sample_temp_dict[next_channel] = next_sample
power_temp_dict[next_channel] = next_power
angle_temp_dict[next_channel] = next_angle
# Check if we have enough records to produce a new row of data
# if yes this means that data from all transducer channels have been read for a particular ping
# a new row of data means all channels of data from one ping
# if only 2 channels of data were received but there are a total of 3 transducers,
# the data are not stored in the final power_data_dict
if len(sample_temp_dict) == len(power_temp_dict) == \
len(angle_temp_dict) == self.config_header['transducer_count']:
# if this is the first ping from all channels,
# create metadata particle and store the frequency / bin_size
if not power_data_dict:
# Initialize each channel to defaultdict
for channel in power_temp_dict:
first_ping_metadata[channel] = defaultdict(list)
angle_data_dict[channel] = []
# Fill in metadata for each channel
for channel, sample_data in sample_temp_dict.items():
self.append_metadata(first_ping_metadata[channel], channel, sample_data)
# Save data and metadata from each ping to *_data_dict
data_times.append(next_time)
motion.append(np.array([(sample_temp_dict[1]['heave'], # all channels have the same motion
sample_temp_dict[1]['pitch'],
sample_temp_dict[1]['roll'])],
dtype=[('heave', 'f4'), ('pitch', 'f4'), ('roll', 'f4')]))
for channel in power_temp_dict:
power_data_dict[channel].append(power_temp_dict[channel])
if any(angle_temp_dict[channel]): # if split-beam data
angle_data_dict[channel].append(angle_temp_dict[channel])
tr = np.array([(sample_temp_dict[channel]['frequency'],
sample_temp_dict[channel]['transmit_power'],
sample_temp_dict[channel]['pulse_length'],
sample_temp_dict[channel]['bandwidth'],
sample_temp_dict[channel]['sample_interval'])],
dtype=[('frequency', 'f4'), ('transmit_power', 'f4'),
('pulse_length', 'f4'), ('bandwidth', 'f4'),
('sample_interval', 'f4')])
tr_data_dict[channel].append(tr)
# except InvalidTransducer:
# pass
else:
input_file.seek(position + self.BLOCK_SIZE - 4)
# Need current position in file to increment for next regex search offset
position = input_file.tell()
# Read the next block for regex search
raw = input_file.read(self.BLOCK_SIZE)
data_times = np.array(data_times)
# Convert to numpy array and decompress power data to dB
for channel in power_data_dict:
power_data_dict[channel] = np.array(power_data_dict[channel]) * 10. * np.log10(2) / 256.
if angle_data_dict[channel]: # if split-beam data
angle_data_dict[channel] = np.array(angle_data_dict[channel])
else: # if single-beam data
angle_data_dict[channel] = []
tr_data_dict[channel] = np.array(tr_data_dict[channel])
self.first_ping_metadata = first_ping_metadata
self.data_times = data_times
self.motion = motion
self.power_data_dict = power_data_dict
self.angle_data_dict = angle_data_dict
self.tr_data_dict = tr_data_dict
def raw2nc(self):
"""Save data from RAW to netCDF format.
"""
# Subfunctions to set various dictionaries
def _set_toplevel_dict():
attrs = ('Conventions', 'keywords',
'sonar_convention_authority', 'sonar_convention_name',
'sonar_convention_version', 'summary', 'title')
vals = ('CF-1.7, SONAR-netCDF4, ACDD-1.3', 'EK60',
'ICES', 'SONAR-netCDF4', '1.7',
'', '')
out_dict = dict(zip(attrs, vals))
out_dict['date_created'] = dt.strptime(fm.group('date') + '-' + fm.group('time'),
'%Y%m%d-%H%M%S').isoformat() + 'Z'
return out_dict
def _set_env_dict():
attrs = ('frequency', 'absorption_coeff', 'sound_speed')
vals = (freq, abs_val, ss_val)
return dict(zip(attrs, vals))
def _set_prov_dict():
attrs = ('conversion_software_name', 'conversion_software_version', 'conversion_time')
vals = ('echopype', ECHOPYPE_VERSION, dt.now(tz=pytz.utc).isoformat(timespec='seconds')) # use UTC time
return dict(zip(attrs, vals))
def _set_sonar_dict():
attrs = ('sonar_manufacturer', 'sonar_model', 'sonar_serial_number',
'sonar_software_name', 'sonar_software_version', 'sonar_type')
vals = ('Simrad', self.config_header['sounder_name'].decode('utf-8'), '',
'', self.config_header['version'].decode('utf-8'), 'echosounder')
return dict(zip(attrs, vals))
def _set_platform_dict():
out_dict = dict()
out_dict['platform_name'] = self.config_header['survey_name'].decode('utf-8')
if re.search('OOI', out_dict['platform_name']):
out_dict['platform_type'] = 'subsurface mooring' # if OOI
else:
out_dict['platform_type'] = 'ship' # default to ship
out_dict['time'] = self.data_times # [seconds since 1900-01-01] for xarray.to_netcdf conversion
out_dict['pitch'] = np.array([x['pitch'] for x in self.motion.__iter__()], dtype='float32').squeeze()
out_dict['roll'] = np.array([x['roll'] for x in self.motion.__iter__()], dtype='float32').squeeze()
out_dict['heave'] = np.array([x['heave'] for x in self.motion.__iter__()], dtype='float32').squeeze()
# water_level is set to 0 for EK60 since this is not separately recorded
# and is part of transducer_depth
out_dict['water_level'] = np.int32(0)
return out_dict
def _set_beam_dict():
beam_dict = dict()
beam_dict['beam_mode'] = 'vertical'
beam_dict['conversion_equation_t'] = 'type_3' # type_3 is EK60 conversion
beam_dict['ping_time'] = self.data_times # [seconds since 1900-01-01] for xarray.to_netcdf conversion
beam_dict['backscatter_r'] = np.array([self.power_data_dict[x] for x in self.power_data_dict])
beam_dict['frequency'] = freq # added by echopype, not in convention
beam_dict['range_bin'] = np.arange(self.power_data_dict[1].shape[1]) # added by echopype, not in convention
# Loop through each transducer for variables that are the same for each file
bm_width = defaultdict(lambda: np.zeros(shape=(tx_num,), dtype='float32'))
bm_dir = defaultdict(lambda: np.zeros(shape=(tx_num,), dtype='float32'))
tx_pos = defaultdict(lambda: np.zeros(shape=(tx_num,), dtype='float32'))
beam_dict['equivalent_beam_angle'] = np.zeros(shape=(tx_num,), dtype='float32')
beam_dict['gain_correction'] = np.zeros(shape=(tx_num,), dtype='float32')
beam_dict['gpt_software_version'] = []
beam_dict['channel_id'] = []
for c_seq, c in enumerate(self.config_transducer.__iter__()):
bm_width['beamwidth_receive_major'][c_seq] = c['beam_width_alongship']
bm_width['beamwidth_receive_minor'][c_seq] = c['beam_width_athwartship']
bm_width['beamwidth_transmit_major'][c_seq] = c['beam_width_alongship']
bm_width['beamwidth_transmit_minor'][c_seq] = c['beam_width_athwartship']
bm_dir['beam_direction_x'][c_seq] = c['dir_x']
bm_dir['beam_direction_y'][c_seq] = c['dir_y']
bm_dir['beam_direction_z'][c_seq] = c['dir_z']
tx_pos['transducer_offset_x'][c_seq] = c['pos_x']
tx_pos['transducer_offset_y'][c_seq] = c['pos_y']
tx_pos['transducer_offset_z'][c_seq] = c['pos_z'] + self.first_ping_metadata[c_seq+1]['transducer_depth'][0]
beam_dict['equivalent_beam_angle'][c_seq] = c['equiv_beam_angle']
beam_dict['gain_correction'][c_seq] = c['gain']
beam_dict['gpt_software_version'].append(c['gpt_software_version'].decode('utf-8'))
beam_dict['channel_id'].append(c['channel_id'].decode('utf-8'))
# Loop through each transducer for variables that may vary at each ping
# -- this rarely is the case for EK60 so we check first before saving
pl_tmp = np.unique(self.tr_data_dict[1]['pulse_length']).size
pw_tmp = np.unique(self.tr_data_dict[1]['transmit_power']).size
bw_tmp = np.unique(self.tr_data_dict[1]['bandwidth']).size
si_tmp = np.unique(self.tr_data_dict[1]['sample_interval']).size
if pl_tmp==1 and pw_tmp==1 and bw_tmp==1 and si_tmp==1:
tx_sig = defaultdict(lambda: np.zeros(shape=(tx_num,), dtype='float32'))
beam_dict['sample_interval'] = np.zeros(shape=(tx_num,), dtype='float32')
for t_seq in range(tx_num):
tx_sig['transmit_duration_nominal'][t_seq] = self.tr_data_dict[t_seq + 1]['pulse_length'][0]
tx_sig['transmit_power'][t_seq] = self.tr_data_dict[t_seq + 1]['transmit_power'][0]
tx_sig['transmit_bandwidth'][t_seq] = self.tr_data_dict[t_seq + 1]['bandwidth'][0]
beam_dict['sample_interval'][t_seq] = self.tr_data_dict[t_seq + 1]['sample_interval'][0]
else:
tx_sig = defaultdict(lambda: np.zeros(shape=(tx_num, ping_num), dtype='float32'))
beam_dict['sample_interval'] = np.zeros(shape=(tx_num, ping_num), dtype='float32')
for t_seq in range(tx_num):
tx_sig['transmit_duration_nominal'][t_seq, :] = self.tr_data_dict[t_seq + 1]['pulse_length'].squeeze()
tx_sig['transmit_power'][t_seq, :] = self.tr_data_dict[t_seq + 1]['transmit_power'].squeeze()
tx_sig['transmit_bandwidth'][t_seq, :] = self.tr_data_dict[t_seq + 1]['bandwidth'].squeeze()
beam_dict['sample_interval'][t_seq, :] = self.tr_data_dict[t_seq + 1]['sample_interval'].squeeze()
# Build other parameters
beam_dict['non_quantitative_processing'] = np.array([0, ] * freq.size, dtype='int32')
# -- sample_time_offset is set to 2 for EK60 data, this value is NOT from sample_data['offset']
beam_dict['sample_time_offset'] = np.array([2, ] * freq.size, dtype='int32')
idx = [np.argwhere(self.tr_data_dict[x + 1]['pulse_length'][0] ==
self.config_transducer[x]['pulse_length_table']).squeeze()
for x in range(len(self.config_transducer))]
beam_dict['sa_correction'] = np.array([x['sa_correction_table'][y]
for x, y in zip(self.config_transducer.__iter__(), np.array(idx))])
return beam_dict, bm_width, bm_dir, tx_pos, tx_sig
# Load data from RAW file
self.load_ek60_raw()
# Get nc filename
filename = os.path.splitext(os.path.basename(self.filename))[0]
self.nc_path = os.path.join(os.path.split(self.filename)[0], filename + '.nc')
fm = self.FILENAME_MATCHER.match(self.filename)
# Check if nc file already exists
# ... if yes, abort conversion and issue warning
# ... if not, continue with conversion
if os.path.exists(self.nc_path):
print(' ... this file has already been converted to .nc, conversion not executed.')
else:
# Retrieve variables
tx_num = self.config_header['transducer_count']
ping_num = self.data_times.size
freq = np.array([x['frequency'][0] for x in self.first_ping_metadata.values()], dtype='float32')
abs_val = np.array([x['absorption_coeff'][0] for x in self.first_ping_metadata.values()], dtype='float32')
ss_val = np.array([x['sound_velocity'][0] for x in self.first_ping_metadata.values()], dtype='float32')
# Create SetGroups object
grp = SetGroups(file_path=self.nc_path)
grp.set_toplevel(_set_toplevel_dict()) # top-level group
grp.set_env(_set_env_dict()) # environment group
grp.set_provenance(os.path.basename(self.filename),
_set_prov_dict()) # provenance group
grp.set_platform(_set_platform_dict()) # platform group
grp.set_sonar(_set_sonar_dict()) # sonar group
grp.set_beam(*_set_beam_dict()) # beam group
| StarcoderdataPython |
1689206 | <gh_stars>1-10
import mailbox
import sys
from sets import Set
from operator import itemgetter
import time
import email.utils
import re
import os
from git import Repo
import atexit
import shutil
import smtplib
from email.mime.text import MIMEText
email_message_failed = '''Patch set failed to apply to the current HEAD.
--
Generated by https://github.com/haraldh/mail2git
'''
from mail2gitconfig import *
if email_message and not email_message_ok:
email_message_ok = email_message
if email_to:
smtp = smtplib.SMTP('localhost')
MAIL2GIT_VARDIR = "/var/tmp/mail2git-%d" % os.getpid()
mid = {}
kid = {}
threads = {}
patch_pattern = re.compile(r'[PATCH.*[^\]]*\s*\d+/(\d+)\s*\]')
diff_pattern = re.compile(r'^index.*\n---.*\n\+\+\+.*\n@@ .*', re.MULTILINE)
def check_complete(thread):
max = 0
for i in thread:
s = mid[i]['Subject'].replace('\n', ' ').replace('\r', '')
#print "Checking %s" % s
m = patch_pattern.search(s)
if m:
max = int(m.group(1))
break
else:
return [ thread[0] ]
if max < 1:
return None
ret = []
for n in range(1,max+1):
f = '\[PATCH.*%d\/%d\s*\]' % (n, max)
fp = re.compile(f)
#print len(thread)
for i in thread:
s = mid[i]['Subject'].replace('\n', ' ').replace('\r', '')
if fp.search(s):
#print "Found '%s' in %s" % (f, s)
ret.append(i)
break
else:
#print "Not Found in %s" % f
return None
return ret
def cleanup_maildir():
shutil.rmtree(MAIL2GIT_VARDIR, ignore_errors=True)
atexit.register(cleanup_maildir)
inbox = mailbox.mbox(mailbox_file)
def inbox_unlock():
global inbox
inbox.flush()
inbox.unlock()
atexit.register(inbox_unlock)
inbox.lock()
for key in inbox.iterkeys():
try:
message = inbox[key]
except email.errors.MessageParseError:
inbox.discard(key)
continue
i = message['Message-ID']
mid[i] = message
kid[i] = key
if message['subject'].find("[PATCH") == -1:
inbox.discard(key)
continue
# search also with MIME decode
if not diff_pattern.search(message.as_string()):
if message.is_multipart():
for m in message.get_payload(decode=True) or []:
if diff_pattern.search(m.get_payload(decode=True)):
break
else:
inbox.discard(key)
continue
else:
if not diff_pattern.search(message.get_payload(decode=True)):
inbox.discard(key)
continue
s = message['Subject'].replace('\n', ' ').replace('\r', '')
if message.has_key('In-Reply-To') and patch_pattern.search(s):
if not threads.has_key(message['In-Reply-To']):
threads[message['In-Reply-To']] = Set()
threads[message['In-Reply-To']].add(message['In-Reply-To'])
threads[message['In-Reply-To']].add(i)
#print "Adding %s" % s
continue
if patch_pattern.match(s) and not re.search("\[PATCH.*[^\]]*\s*1/(\d+)\s*\]", s):
continue
if not threads.has_key(i):
threads[i] = Set()
#print "Adding %s" % s
threads[i].add(i)
# remove all threads without the starting message
# remove all threads with incomplete patch set
for t in threads.keys():
if not mid.has_key(t):
#print "Removing %s" % t
del threads[t]
continue
# sort threads by date
for t in threads.keys():
l = [(email.utils.mktime_tz(email.utils.parsedate_tz(mid[t]['date'])), f) for f in threads[t]]
l.sort(key=itemgetter(1))
threads[t] = [ b for a, b in l ]
for t in threads.keys():
ret = check_complete(threads[t])
if not ret:
#print "Deleting %s" % t
del threads[t]
continue
#print "Saving %s" % ret
threads[t] = ret
if not os.path.isdir(MAIL2GIT_VARDIR):
os.mkdir(MAIL2GIT_VARDIR)
# print threads
for t in threads.keys():
lastmid = threads[t][-1][1:-1]
mboxfile = '%s/%s' % (MAIL2GIT_VARDIR, lastmid)
if not os.path.isfile(mboxfile):
box = mailbox.mbox(mboxfile)
box.lock()
for b in threads[t]:
message = mid[b]
box.add(message)
#print message['subject']
box.flush()
box.unlock()
box.close()
for b in threads[t]:
key = kid[b]
inbox.discard(key)
repo = Repo(".")
git = repo.git
repo.heads.master.checkout()
git.pull("--all", "--prune")
for t in threads.keys():
lastmid = threads[t][-1][1:-1]
lastsubject = mid[threads[t][-1]]['Subject']
mboxfile = '%s/%s' % (MAIL2GIT_VARDIR, lastmid)
if not os.path.isfile(mboxfile):
continue
# check if branch already exists
if lastmid in repo.heads or "refs/remotes/origin/%s" % lastmid in repo.refs:
#repo.delete_head(lastmid, "-D")
print "Branch %s does already exist" % lastmid
continue
repo.heads.master.checkout()
new_branch = repo.create_head(lastmid, repo.heads.master)
new_branch.checkout()
try:
#if os.system("git am %s" % mboxfile) != 0:
# raise Exception("test")
git.am(mboxfile, "--scissors")
print "[OK] %s" % lastmid
except:
print "[FAILED] %s" % lastmid
try:
git.am("--abort")
except:
pass
repo.heads.master.checkout()
repo.delete_head(lastmid, "-D")
if email_to and email_message_failed:
msg = MIMEText(email_message_failed % lastmid.replace("@", "%40"))
msg.add_header('In-Reply-To', '<' + lastmid + '>')
msg.add_header('References', '<' + lastmid + '>')
msg['From'] = email_from
msg['To' ] = email_to
msg['Subject'] = "Re: " + lastsubject
msg['Date'] = email.utils.formatdate()
msg['Message-ID'] = email.utils.make_msgid('githubbot')
smtp.sendmail(email_from, [email_to], msg.as_string())
continue
else:
if email_to and email_message_ok:
msg = MIMEText(email_message_ok % lastmid.replace("@", "%40"))
msg.add_header('In-Reply-To', '<' + lastmid + '>')
msg.add_header('References', '<' + lastmid + '>')
msg['From'] = email_from
msg['To' ] = email_to
msg['Subject'] = "Re: " + lastsubject
msg['Date'] = email.utils.formatdate()
msg['Message-ID'] = email.utils.make_msgid('githubbot')
smtp.sendmail(email_from, [email_to], msg.as_string())
smtp.quit()
repo.heads.master.checkout()
git.push("--all")
# Now remove all messages older than a day
for key in inbox.iterkeys():
message = inbox[key]
mtime = email.utils.mktime_tz(email.utils.parsedate_tz(message['date']))
ltime = time.time()
if (ltime - mtime) > 86400:
inbox.discard(key)
| StarcoderdataPython |
3307356 | <reponame>trh0ly/Derivate
"""
Dieses Beispiel stammt aus dem Buch "Python for Finance - Second Edition" von Yuxing Yan: https://www.packtpub.com/big-data-and-business-intelligence/python-finance-second-edition
Sämtliche Beispiele sind in leicht abgewandeltet Form zu finden unter: https://github.com/PacktPublishing/Python-for-Finance-Second-Edition
MIT License
Copyright (c) 2017 Packt
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def alternative_smile():
data2 = pd.read_csv('calls17march.txt',
delimiter='\t',
skiprows=1,
usecols=['Strike','Implied Volatility'])
x = data2['Strike']
y0 = data2['Implied Volatility']
n = len(y0)
y = []
for i in np.arange(n):
a = float(y0[i].replace("%","")) / 100.0
y.append(a)
plt.title('Volatility smile - IBM Calls mit Fälligkeit: 3/17/2017')
plt.ylabel('Volatilität')
plt.xlabel('Preis des Strikes')
plt.plot(x,y,'o')
plt.grid()
plt.show()
alternative_smile()
| StarcoderdataPython |
17675 | from __future__ import division, print_function, absolute_import
from .core import SeqletCoordinates
from modisco import util
import numpy as np
from collections import defaultdict, Counter, OrderedDict
import itertools
import sys
import time
from .value_provider import (
AbstractValTransformer, AbsPercentileValTransformer,
SignedPercentileValTransformer, PrecisionValTransformer)
import scipy
from sklearn.isotonic import IsotonicRegression
SUBSAMPLE_CAP = 1000000
#The only parts of TransformAndThresholdResults that are used in
# TfModiscoWorkflow are the transformed_pos/neg_thresholds and the
# val_transformer (used in metaclustering with multiple tasks)
#TransformAndThresholdResults are also used to be
# able to replicate the same procedure used for identifying coordinates as
# when TfMoDisco was first run; the information needed in that case would
# be specific to the type of Coordproducer used
class AbstractTransformAndThresholdResults(object):
def __init__(self, transformed_neg_threshold, transformed_pos_threshold,
val_transformer):
self.transformed_neg_threshold = transformed_neg_threshold
self.transformed_pos_threshold = transformed_pos_threshold
self.val_transformer = val_transformer
@classmethod
def from_hdf5(cls, grp):
if "class" not in grp.attrs:
the_class = FWACTransformAndThresholdResults
else:
the_class = eval(grp.attrs["class"])
if (the_class.__name__ != cls.__name__):
return the_class.from_hdf5(grp)
class BasicTransformAndThresholdResults(AbstractTransformAndThresholdResults):
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["transformed_neg_threshold"] = self.transformed_neg_threshold
grp.attrs["transformed_pos_threshold"] = self.transformed_pos_threshold
self.val_transformer.save_hdf5(grp.create_group("val_transformer"))
@classmethod
def load_basic_attrs_from_hdf5(cls, grp):
transformed_neg_threshold = grp.attrs['transformed_neg_threshold']
transformed_pos_threshold = grp.attrs['transformed_pos_threshold']
val_transformer = AbstractValTransformer.from_hdf5(
grp["val_transformer"])
return (transformed_neg_threshold, transformed_pos_threshold,
val_transformer)
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
(transformed_neg_threshold,
transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
return cls(transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
#FWAC = FixedWindowAroundChunks; this TransformAndThresholdResults object
# is specific to the type of info needed in that case.
class FWACTransformAndThresholdResults(
BasicTransformAndThresholdResults):
def __init__(self, neg_threshold,
transformed_neg_threshold,
pos_threshold,
transformed_pos_threshold,
val_transformer):
#both 'transformed_neg_threshold' and 'transformed_pos_threshold'
# should be positive, i.e. they should be relative to the
# transformed distribution used to set the threshold, e.g. a
# cdf value
self.neg_threshold = neg_threshold
self.pos_threshold = pos_threshold
super(FWACTransformAndThresholdResults, self).__init__(
transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
def save_hdf5(self, grp):
super(FWACTransformAndThresholdResults, self).save_hdf5(grp)
grp.attrs["neg_threshold"] = self.neg_threshold
grp.attrs["pos_threshold"] = self.pos_threshold
@classmethod
def from_hdf5(cls, grp):
(transformed_neg_threshold, transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
neg_threshold = grp.attrs['neg_threshold']
pos_threshold = grp.attrs['pos_threshold']
return cls(neg_threshold=neg_threshold,
transformed_neg_threshold=transformed_neg_threshold,
pos_threshold=pos_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
class AbstractCoordProducer(object):
def __call__(self):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class SeqletCoordsFWAP(SeqletCoordinates):
"""
Coordinates for the FixedWindowAroundChunks CoordProducer
"""
def __init__(self, example_idx, start, end, score, other_info={}):
self.score = score
self.other_info = other_info
super(SeqletCoordsFWAP, self).__init__(
example_idx=example_idx,
start=start, end=end,
is_revcomp=False)
class CoordProducerResults(object):
def __init__(self, coords, tnt_results):
self.coords = coords
self.tnt_results = tnt_results
@classmethod
def from_hdf5(cls, grp):
coord_strings = util.load_string_list(dset_name="coords",
grp=grp)
coords = [SeqletCoordinates.from_string(x) for x in coord_strings]
tnt_results = AbstractTransformAndThresholdResults.from_hdf5(
grp["tnt_results"])
return CoordProducerResults(coords=coords,
tnt_results=tnt_results)
def save_hdf5(self, grp):
util.save_string_list(
string_list=[str(x) for x in self.coords],
dset_name="coords",
grp=grp)
self.tnt_results.save_hdf5(
grp=grp.create_group("tnt_results"))
def get_simple_window_sum_function(window_size):
def window_sum_function(arrs):
to_return = []
for arr in arrs:
cumsum = np.cumsum(arr)
cumsum = np.array([0]+list(cumsum))
to_return.append(cumsum[window_size:]-cumsum[:-window_size])
return to_return
return window_sum_function
class GenerateNullDist(object):
def __call__(self, score_track):
raise NotImplementedError()
class TakeSign(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.sign(x) for x in score_track]
return null_tracks
class TakeAbs(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.abs(x) for x in score_track]
return null_tracks
class LaplaceNullDist(GenerateNullDist):
def __init__(self, num_to_samp, verbose=True,
percentiles_to_use=[5*(x+1) for x in range(19)],
random_seed=1234):
self.num_to_samp = num_to_samp
self.verbose = verbose
self.percentiles_to_use = np.array(percentiles_to_use)
self.random_seed = random_seed
self.rng = np.random.RandomState()
@classmethod
def from_hdf5(cls, grp):
num_to_samp = grp.attrs["num_to_samp"]
verbose = grp.attrs["verbose"]
percentiles_to_use = np.array(grp["percentiles_to_use"][:])
return cls(num_to_samp=num_to_samp, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["num_to_samp"] = self.num_to_samp
grp.attrs["verbose"] = self.verbose
grp.create_dataset('percentiles_to_use',
data=self.percentiles_to_use)
def __call__(self, score_track, window_size, original_summed_score_track):
#original_summed_score_track is supplied to avoid recomputing it
if (original_summed_score_track is None):
window_sum_function = get_simple_window_sum_function(window_size)
original_summed_score_track = window_sum_function(arrs=score_track)
values = np.concatenate(original_summed_score_track, axis=0)
# first estimate mu, using two level histogram to get to 1e-6
hist1, bin_edges1 = np.histogram(values, bins=1000)
peak1 = np.argmax(hist1)
l_edge = bin_edges1[peak1]
r_edge = bin_edges1[peak1+1]
top_values = values[ (l_edge < values) & (values < r_edge) ]
hist2, bin_edges2 = np.histogram(top_values, bins=1000)
peak2 = np.argmax(hist2)
l_edge = bin_edges2[peak2]
r_edge = bin_edges2[peak2+1]
mu = (l_edge + r_edge) / 2
if (self.verbose):
print("peak(mu)=", mu)
pos_values = [x for x in values if x >= mu]
neg_values = [x for x in values if x <= mu]
#for an exponential distribution:
# cdf = 1 - exp(-lambda*x)
# exp(-lambda*x) = 1-cdf
# -lambda*x = log(1-cdf)
# lambda = -log(1-cdf)/x
# x = -log(1-cdf)/lambda
#Take the most aggressive lambda over all percentiles
pos_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.percentile(a=pos_values, q=self.percentiles_to_use)-mu))
neg_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.abs(np.percentile(a=neg_values,
q=100-self.percentiles_to_use)-mu)))
self.rng.seed(self.random_seed)
prob_pos = float(len(pos_values))/(len(pos_values)+len(neg_values))
sampled_vals = []
for i in range(self.num_to_samp):
sign = 1 if (self.rng.uniform() < prob_pos) else -1
if (sign == 1):
sampled_cdf = self.rng.uniform()
val = -np.log(1-sampled_cdf)/pos_laplace_lambda + mu
else:
sampled_cdf = self.rng.uniform()
val = mu + np.log(1-sampled_cdf)/neg_laplace_lambda
sampled_vals.append(val)
return np.array(sampled_vals)
class FlipSignNullDist(GenerateNullDist):
def __init__(self, num_seq_to_samp, shuffle_pos=False,
seed=1234, num_breaks=100,
lower_null_percentile=20,
upper_null_percentile=80):
self.num_seq_to_samp = num_seq_to_samp
self.shuffle_pos = shuffle_pos
self.seed = seed
self.rng = np.random.RandomState()
self.num_breaks = num_breaks
self.lower_null_percentile = lower_null_percentile
self.upper_null_percentile = upper_null_percentile
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track, windowsize, original_summed_score_track):
#summed_score_track is supplied to avoid recomputing it
window_sum_function = get_simple_window_sum_function(windowsize)
if (original_summed_score_track is not None):
original_summed_score_track = window_sum_function(arrs=score_track)
all_orig_summed_scores = np.concatenate(
original_summed_score_track, axis=0)
pos_threshold = np.percentile(a=all_orig_summed_scores,
q=self.upper_null_percentile)
neg_threshold = np.percentile(a=all_orig_summed_scores,
q=self.lower_null_percentile)
#retain only the portions of the tracks that are under the
# thresholds
retained_track_portions = []
num_pos_vals = 0
num_neg_vals = 0
for (single_score_track, single_summed_score_track)\
in zip(score_track, original_summed_score_track):
window_passing_track = [
(1.0 if (x > neg_threshold and x < pos_threshold) else 0)
for x in single_summed_score_track]
padded_window_passing_track = [0.0]*int(windowsize-1)
padded_window_passing_track.extend(window_passing_track)
padded_window_passing_track.extend([0.0]*int(windowsize-1))
pos_in_passing_window = window_sum_function(
[padded_window_passing_track])[0]
assert len(single_score_track)==len(pos_in_passing_window)
single_retained_track = []
for (val, pos_passing) in zip(single_score_track,
pos_in_passing_window):
if (pos_passing > 0):
single_retained_track.append(val)
num_pos_vals += (1 if val > 0 else 0)
num_neg_vals += (1 if val < 0 else 0)
retained_track_portions.append(single_retained_track)
print("Fraction of positions retained:",
sum(len(x) for x in retained_track_portions)/
sum(len(x) for x in score_track))
prob_pos = num_pos_vals/float(num_pos_vals + num_neg_vals)
self.rng.seed(self.seed)
null_tracks = []
for i in range(self.num_seq_to_samp):
random_track = retained_track_portions[
int(self.rng.randint(0,len(retained_track_portions)))]
track_with_sign_flips = np.array([
abs(x)*(1 if self.rng.uniform() < prob_pos else -1)
for x in random_track])
if (self.shuffle_pos):
self.rng.shuffle(track_with_sign_flips)
null_tracks.append(track_with_sign_flips)
return np.concatenate(window_sum_function(null_tracks), axis=0)
def get_null_vals(null_track, score_track, window_size,
original_summed_score_track):
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=window_size,
original_summed_score_track=original_summed_score_track)
else:
window_sum_function = get_simple_window_sum_function(window_size)
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = list(np.concatenate(null_summed_score_track, axis=0))
return null_vals
def subsample_if_large(arr):
if (len(arr) > SUBSAMPLE_CAP):
print("Subsampling!")
sys.stdout.flush()
arr = np.random.RandomState(1234).choice(a=arr, size=SUBSAMPLE_CAP,
replace=False)
return arr
def irval_to_probpos(irval, frac_neg):
#n(x):= pdf of null dist (negatives)
#p(x):= pdf of positive distribution
#f_p:= fraction of positives
#f_n:= fraction of negatives = 1-f_p
#o(x):= pdf of observed distribution = n(x)f_n + p(x)f_p
#The isotonic regression produces a(x) = o(x)/[o(x) + n(x)]
# o(x)/[o(x) + n(x)] = [n(x)f_n + o(x)f_p]/[n(x)(1+f_n) + p(x)]
# a(x)[n(x)(1+f_n) + p(x)f_p] = n(x)f_n + p(x)f_p
# a(x)n(x)(1+f_n) - n(x)f_n = p(x)f_p - a(x)p(x)f_p
# n(x)[a(x)(1+f_n) - f_n] = p(x)f_p[1 - a(x)]
# [a(x)/f_n + (a(x)-1)]/[1-a(x)] = (p(x)f_p)/(n(x)f_n) = r(x)
#p_pos = 1 / (1 + 1/r(x))
# = [a(x)/f_n + (a(x)-1)]/[a(x)/f_n + (a(x)-1) + (1-a(x))]
# = [a(x)/f_n + a(x)-1]/[a(x)/f_n]
# = [a(x) + f_n(a(x)-1)]/a(x)
# = 1 + f_n(a(x)-1)/a(x)
# = 1 + f_n(1 - 1/a(x))
#If solving for p_pos=0, we have -1/(1 - 1/a(x)) = f_n
#As f_n --> 100%, p_pos --> 2 - 1/a(x); this assumes max(a(x)) = 0.5
return np.minimum(np.maximum(1 + frac_neg*(
1 - (1/np.maximum(irval,1e-7))), 0.0), 1.0)
class SavableIsotonicRegression(object):
def __init__(self, origvals, nullvals, increasing, min_frac_neg=0.95):
self.origvals = origvals
self.nullvals = nullvals
self.increasing = increasing
self.min_frac_neg = min_frac_neg
self.ir = IsotonicRegression(out_of_bounds='clip',
increasing=increasing).fit(
X=np.concatenate([self.origvals, self.nullvals], axis=0),
y=([1.0 for x in self.origvals] + [0.0 for x in self.nullvals]),
sample_weight=([1.0 for x in self.origvals]
+[float(len(self.origvals))/len(self.nullvals)
for x in self.nullvals]))
#Infer frac_pos based on the minimum value of the ir probs
#See derivation in irval_to_probpos function
min_prec_x = self.ir.X_min_ if self.increasing else self.ir.X_max_
min_precision = self.ir.transform([min_prec_x])[0]
implied_frac_neg = -1/(1-(1/max(min_precision,1e-7)))
print("For increasing =",increasing,", the minimum IR precision was",
min_precision,"occurring at",min_prec_x,
"implying a frac_neg",
"of",implied_frac_neg)
if (implied_frac_neg > 1.0 or implied_frac_neg < self.min_frac_neg):
implied_frac_neg = max(min(1.0,implied_frac_neg),
self.min_frac_neg)
print("To be conservative, adjusted frac neg is",implied_frac_neg)
self.implied_frac_neg = implied_frac_neg
def transform(self, vals):
return irval_to_probpos(self.ir.transform(vals),
frac_neg=self.implied_frac_neg)
def save_hdf5(self, grp):
grp.attrs['increasing'] = self.increasing
grp.attrs['min_frac_neg'] = self.min_frac_neg
grp.create_dataset('origvals', data=self.origvals)
grp.create_dataset('nullvals', data=self.nullvals)
@classmethod
def from_hdf5(cls, grp):
increasing = grp.attrs['increasing']
min_frac_neg = grp.attrs['min_frac_neg']
origvals = np.array(grp['origvals'])
nullvals = np.array(grp['nullvals'])
return cls(origvals=origvals, nullvals=nullvals,
increasing=increasing, min_frac_neg=min_frac_neg)
def get_isotonic_regression_classifier(orig_vals, null_vals):
orig_vals = subsample_if_large(orig_vals)
null_vals = subsample_if_large(null_vals)
pos_orig_vals = (
np.array(sorted([x for x in orig_vals if x >= 0])))
neg_orig_vals = (
np.array(sorted([x for x in orig_vals if x < 0],
key=lambda x: abs(x))))
pos_null_vals = [x for x in null_vals if x >= 0]
neg_null_vals = [x for x in null_vals if x < 0]
pos_ir = SavableIsotonicRegression(origvals=pos_orig_vals,
nullvals=pos_null_vals, increasing=True)
if (len(neg_orig_vals) > 0):
neg_ir = SavableIsotonicRegression(origvals=neg_orig_vals,
nullvals=neg_null_vals, increasing=False)
else:
neg_ir = None
return pos_ir, neg_ir, orig_vals, null_vals
#sliding in this case would be a list of values
class VariableWindowAroundChunks(AbstractCoordProducer):
count = 0
def __init__(self, sliding, flank, suppress, target_fdr,
min_passing_windows_frac, max_passing_windows_frac,
separate_pos_neg_thresholds,
max_seqlets_total,
progress_update=5000,
verbose=True, plot_save_dir="figures"):
self.sliding = sliding
self.flank = flank
self.suppress = suppress
self.target_fdr = target_fdr
assert max_passing_windows_frac >= min_passing_windows_frac
self.min_passing_windows_frac = min_passing_windows_frac
self.max_passing_windows_frac = max_passing_windows_frac
self.separate_pos_neg_thresholds = separate_pos_neg_thresholds
self.max_seqlets_total = None
self.progress_update = progress_update
self.verbose = verbose
self.plot_save_dir = plot_save_dir
@classmethod
def from_hdf5(cls, grp):
sliding = np.array(grp["sliding"]).astype("int")
flank = grp.attrs["flank"]
suppress = grp.attrs["suppress"]
target_fdr = grp.attrs["target_fdr"]
min_passing_windows_frac = grp.attrs["min_passing_windows_frac"]
max_passing_windows_frac = grp.attrs["max_passing_windows_frac"]
separate_pos_neg_thresholds = grp.attrs["separate_pos_neg_thresholds"]
if ("max_seqlets_total" in grp.attrs):
max_seqlets_total = grp.attrs["max_seqlets_total"]
else:
max_seqlets_total = None
progress_update = grp.attrs["progress_update"]
verbose = grp.attrs["verbose"]
return cls(sliding=sliding, flank=flank, suppress=suppress,
target_fdr=target_fdr,
min_passing_windows_frac=min_passing_windows_frac,
max_passing_windows_frac=max_passing_windows_frac,
separate_pos_neg_thresholds=separate_pos_neg_thresholds,
max_seqlets_total=max_seqlets_total,
progress_update=progress_update, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.create_dataset("sliding", data=np.array(self.sliding))
grp.attrs["flank"] = self.flank
grp.attrs["suppress"] = self.suppress
grp.attrs["target_fdr"] = self.target_fdr
grp.attrs["min_passing_windows_frac"] = self.min_passing_windows_frac
grp.attrs["max_passing_windows_frac"] = self.max_passing_windows_frac
grp.attrs["separate_pos_neg_thresholds"] =\
self.separate_pos_neg_thresholds
if (self.max_seqlets_total is not None):
grp.attrs["max_seqlets_total"] = self.max_seqlets_total
grp.attrs["progress_update"] = self.progress_update
grp.attrs["verbose"] = self.verbose
def fit_pos_and_neg_irs(self, score_track, null_track):
pos_irs = []
neg_irs = []
for sliding_window_size in self.sliding:
window_sum_function = get_simple_window_sum_function(
sliding_window_size)
print("Fitting - on window size",sliding_window_size)
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=sliding_window_size,
original_summed_score_track=None)
else:
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = np.concatenate(null_summed_score_track,
axis=0)
print("Computing window sums")
sys.stdout.flush()
window_sums_rows = window_sum_function(arrs=score_track)
print("Done computing window sums")
sys.stdout.flush()
orig_vals = np.concatenate(window_sums_rows, axis=0)
pos_ir, neg_ir, subsampled_orig_vals, subsampled_null_vals =\
get_isotonic_regression_classifier(
orig_vals=np.concatenate(window_sums_rows, axis=0),
null_vals=null_vals)
make_nulldist_figure(orig_vals=subsampled_orig_vals,
null_vals=subsampled_null_vals,
pos_ir=pos_ir, neg_ir=neg_ir,
pos_threshold=None,
neg_threshold=None)
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="scoredist_window"
+str(sliding_window_size)+"_"
+str(VariableWindowAroundChunks.count)+".png")
pos_irs.append(pos_ir)
neg_irs.append(neg_ir)
return pos_irs, neg_irs
def __call__(self, score_track, null_track, tnt_results=None):
if (tnt_results is None):
pos_irs, neg_irs = self.fit_pos_and_neg_irs(
score_track=score_track,
null_track=null_track)
precision_transformer = PrecisionValTransformer(
sliding_window_sizes=self.sliding,
pos_irs=pos_irs,
neg_irs=neg_irs)
(precisiontransformed_score_track,
precisiontransformed_bestwindowsizeidxs) =\
precision_transformer.transform_score_track(
score_track=score_track)
subsampled_prec_vals = subsample_if_large(
np.concatenate(precisiontransformed_score_track, axis=0))
from matplotlib import pyplot as plt
plt.plot(sorted(subsampled_prec_vals),
(np.arange(len(subsampled_prec_vals))/
len(subsampled_prec_vals)))
plt.xlabel("Tranformed IR precision value")
plt.ylabel("CDF")
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="final_prec_vals_cdf_dist"
+str(VariableWindowAroundChunks.count)+".png")
#Pick a threshold according the the precisiontransformed score track
pos_threshold = (1-self.target_fdr)
neg_threshold = -(1-self.target_fdr)
pos_threshold, neg_threshold =\
refine_thresholds_based_on_frac_passing(
vals=subsampled_prec_vals,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold,
min_passing_windows_frac=self.min_passing_windows_frac,
max_passing_windows_frac=self.max_passing_windows_frac,
separate_pos_neg_thresholds=self.separate_pos_neg_thresholds,
verbose=self.verbose)
tnt_results = BasicTransformAndThresholdResults(
transformed_neg_threshold=neg_threshold,
transformed_pos_threshold=pos_threshold,
val_transformer=precision_transformer)
else:
precision_transformer = tnt_results.val_transformer
(precisiontransformed_score_track,
precisiontransformed_bestwindowsizeidxs) =\
precision_transformer.transform_score_track(
score_track=score_track)
#Need to remove padding because identify_coords is assumed to
# operate on a scoretrack that has already been processed with
# a sliding window of window_size (and assumes that partial windows
# were not included)
left_padding_to_remove = int((max(self.sliding)-1)/2)
right_padding_to_remove = (max(self.sliding)-1)-left_padding_to_remove
coords = identify_coords(
score_track=[x[left_padding_to_remove:-right_padding_to_remove]
for x in precisiontransformed_score_track],
pos_threshold=tnt_results.transformed_pos_threshold,
neg_threshold=tnt_results.transformed_neg_threshold,
window_size=max(self.sliding),
flank=self.flank,
suppress=self.suppress,
max_seqlets_total=self.max_seqlets_total,
verbose=self.verbose,
other_info_tracks={'best_window_idx':
[x[left_padding_to_remove:-right_padding_to_remove] for x in
precisiontransformed_bestwindowsizeidxs]})
VariableWindowAroundChunks.count += 1
return CoordProducerResults(
coords=coords,
tnt_results=tnt_results)
#identify_coords is expecting something that has already been processed
# with sliding windows of size window_size
def identify_coords(score_track, pos_threshold, neg_threshold,
window_size, flank, suppress,
max_seqlets_total, verbose, other_info_tracks={}):
for other_info_track in other_info_tracks.values():
assert all([x.shape==y.shape for x,y
in zip(other_info_track,score_track)])
#cp_score_track = 'copy' of the score track, which can be modified as
# coordinates are identified
cp_score_track = [np.array(x) for x in score_track]
#if a position is less than the threshold, set it to -np.inf
#Note that the threshold comparisons need to be >= and not just > for
# cases where there are lots of ties at the high end (e.g. with an IR
# tranformation that gives a lot of values that have a precision of 1.0)
cp_score_track = [
np.array([np.abs(y) if (y >= pos_threshold
or y <= neg_threshold)
else -np.inf for y in x])
for x in cp_score_track]
coords = []
for example_idx,single_score_track in enumerate(cp_score_track):
#set the stuff near the flanks to -np.inf so that we
# don't pick it up during argmax
single_score_track[0:flank] = -np.inf
single_score_track[len(single_score_track)-(flank):
len(single_score_track)] = -np.inf
while True:
argmax = np.argmax(single_score_track,axis=0)
max_val = single_score_track[argmax]
#bail if exhausted everything that passed the threshold
#and was not suppressed
if (max_val == -np.inf):
break
#need to be able to expand without going off the edge
if ((argmax >= flank) and
(argmax < (len(single_score_track)-flank))):
coord = SeqletCoordsFWAP(
example_idx=example_idx,
start=argmax-flank,
end=argmax+window_size+flank,
score=score_track[example_idx][argmax],
other_info = dict([
(track_name, track[example_idx][argmax])
for (track_name, track) in other_info_tracks.items()]))
assert (coord.score >= pos_threshold
or coord.score <= neg_threshold)
coords.append(coord)
else:
assert False,\
("This shouldn't happen because I set stuff near the"
"border to -np.inf early on")
#suppress the chunks within +- suppress
left_supp_idx = int(max(np.floor(argmax+0.5-suppress),0))
right_supp_idx = int(min(np.ceil(argmax+0.5+suppress),
len(single_score_track)))
single_score_track[left_supp_idx:right_supp_idx] = -np.inf
if (verbose):
print("Got "+str(len(coords))+" coords")
sys.stdout.flush()
if ((max_seqlets_total is not None) and
len(coords) > max_seqlets_total):
if (verbose):
print("Limiting to top "+str(max_seqlets_total))
sys.stdout.flush()
coords = sorted(coords, key=lambda x: -np.abs(x.score))\
[:max_seqlets_total]
return coords
def refine_thresholds_based_on_frac_passing(
vals, pos_threshold, neg_threshold,
min_passing_windows_frac, max_passing_windows_frac,
separate_pos_neg_thresholds, verbose):
frac_passing_windows =(
sum(vals >= pos_threshold)
+ sum(vals <= neg_threshold))/float(len(vals))
if (verbose):
print("Thresholds from null dist were",
neg_threshold," and ",pos_threshold,
"with frac passing", frac_passing_windows)
pos_vals = [x for x in vals if x >= 0]
neg_vals = [x for x in vals if x < 0]
#deal with edge case of len < 0
pos_vals = [0] if len(pos_vals)==0 else pos_vals
neg_vals = [0] if len(neg_vals)==0 else neg_vals
#adjust the thresholds if the fall outside the min/max
# windows frac
if (frac_passing_windows < min_passing_windows_frac):
if (verbose):
print("Passing windows frac was",
frac_passing_windows,", which is below ",
min_passing_windows_frac,"; adjusting")
if (separate_pos_neg_thresholds):
pos_threshold = np.percentile(
a=pos_vals,
q=100*(1-min_passing_windows_frac))
neg_threshold = np.percentile(
a=neg_vals,
q=100*(min_passing_windows_frac))
else:
pos_threshold = np.percentile(
a=np.abs(vals),
q=100*(1-min_passing_windows_frac))
neg_threshold = -pos_threshold
if (frac_passing_windows > max_passing_windows_frac):
if (verbose):
print("Passing windows frac was",
frac_passing_windows,", which is above ",
max_passing_windows_frac,"; adjusting")
if (separate_pos_neg_thresholds):
pos_threshold = np.percentile(
a=pos_vals,
q=100*(1-max_passing_windows_frac))
neg_threshold = np.percentile(
a=neg_vals,
q=100*(max_passing_windows_frac))
else:
pos_threshold = np.percentile(
a=np.abs(vals),
q=100*(1-max_passing_windows_frac))
neg_threshold = -pos_threshold
if (verbose):
print("New thresholds are",pos_threshold,"and",neg_threshold)
return pos_threshold, neg_threshold
def make_nulldist_figure(orig_vals, null_vals, pos_ir, neg_ir,
pos_threshold, neg_threshold):
from matplotlib import pyplot as plt
fig,ax1 = plt.subplots()
orig_vals = np.array(sorted(orig_vals))
ax1.hist(orig_vals, bins=100, density=True, alpha=0.5)
ax1.hist(null_vals, bins=100, density=True, alpha=0.5)
ax1.set_ylabel("Probability density\n(blue=foreground, orange=null)")
ax1.set_xlabel("Total importance in window")
precisions = pos_ir.transform(orig_vals)
if (neg_ir is not None):
precisions = np.maximum(precisions, neg_ir.transform(orig_vals))
ax2 = ax1.twinx()
ax2.plot(orig_vals, precisions)
if (pos_threshold is not None):
ax2.plot([pos_threshold, pos_threshold], [0.0, 1.0], color="red")
if (neg_threshold is not None):
ax2.plot([neg_threshold, neg_threshold], [0.0, 1.0], color="red")
ax2.set_ylabel("Estimated foreground precision")
ax2.set_ylim(0.0, 1.02)
class FixedWindowAroundChunks(AbstractCoordProducer):
count = 0
def __init__(self, sliding,
flank,
suppress, #flanks to suppress
target_fdr,
min_passing_windows_frac,
max_passing_windows_frac,
separate_pos_neg_thresholds=False,
max_seqlets_total=None,
progress_update=5000,
verbose=True,
plot_save_dir="figures"):
self.sliding = sliding
self.flank = flank
self.suppress = suppress
self.target_fdr = target_fdr
assert max_passing_windows_frac >= min_passing_windows_frac
self.min_passing_windows_frac = min_passing_windows_frac
self.max_passing_windows_frac = max_passing_windows_frac
self.separate_pos_neg_thresholds = separate_pos_neg_thresholds
self.max_seqlets_total = None
self.progress_update = progress_update
self.verbose = verbose
self.plot_save_dir = plot_save_dir
@classmethod
def from_hdf5(cls, grp):
sliding = grp.attrs["sliding"]
flank = grp.attrs["flank"]
suppress = grp.attrs["suppress"]
target_fdr = grp.attrs["target_fdr"]
min_passing_windows_frac = grp.attrs["min_passing_windows_frac"]
max_passing_windows_frac = grp.attrs["max_passing_windows_frac"]
separate_pos_neg_thresholds = grp.attrs["separate_pos_neg_thresholds"]
if ("max_seqlets_total" in grp.attrs):
max_seqlets_total = grp.attrs["max_seqlets_total"]
else:
max_seqlets_total = None
progress_update = grp.attrs["progress_update"]
verbose = grp.attrs["verbose"]
return cls(sliding=sliding, flank=flank, suppress=suppress,
target_fdr=target_fdr,
min_passing_windows_frac=min_passing_windows_frac,
max_passing_windows_frac=max_passing_windows_frac,
separate_pos_neg_thresholds=separate_pos_neg_thresholds,
max_seqlets_total=max_seqlets_total,
progress_update=progress_update, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["sliding"] = self.sliding
grp.attrs["flank"] = self.flank
grp.attrs["suppress"] = self.suppress
grp.attrs["target_fdr"] = self.target_fdr
grp.attrs["min_passing_windows_frac"] = self.min_passing_windows_frac
grp.attrs["max_passing_windows_frac"] = self.max_passing_windows_frac
grp.attrs["separate_pos_neg_thresholds"] =\
self.separate_pos_neg_thresholds
if (self.max_seqlets_total is not None):
grp.attrs["max_seqlets_total"] = self.max_seqlets_total
grp.attrs["progress_update"] = self.progress_update
grp.attrs["verbose"] = self.verbose
def __call__(self, score_track, null_track, tnt_results=None):
# score_track now can be a list of arrays,
assert all([len(x.shape)==1 for x in score_track])
window_sum_function = get_simple_window_sum_function(self.sliding)
if (self.verbose):
print("Computing windowed sums on original")
sys.stdout.flush()
original_summed_score_track = window_sum_function(arrs=score_track)
#Determine the window thresholds
if (tnt_results is None):
if (self.verbose):
print("Generating null dist")
sys.stdout.flush()
null_vals = get_null_vals(
null_track=null_track,
score_track=score_track,
window_size=self.sliding,
original_summed_score_track=original_summed_score_track)
if (self.verbose):
print("Computing threshold")
sys.stdout.flush()
orig_vals = list(
np.concatenate(original_summed_score_track, axis=0))
#Note that orig_vals may have been subsampled at this point
pos_ir, neg_ir, subsampled_orig_vals, subsampled_null_vals =\
get_isotonic_regression_classifier(
orig_vals=orig_vals,
null_vals=null_vals)
subsampled_pos_orig_vals = (
np.array(sorted([x for x in subsampled_orig_vals if x >= 0])))
subsampled_neg_orig_vals = (
np.array(sorted([x for x in subsampled_orig_vals if x < 0],
key=lambda x: abs(x))))
subsampled_pos_val_precisions =\
pos_ir.transform(subsampled_pos_orig_vals)
if (len(subsampled_neg_orig_vals) > 0):
subsampled_neg_val_precisions =\
neg_ir.transform(subsampled_neg_orig_vals)
pos_threshold = ([x[1] for x in
zip(subsampled_pos_val_precisions,
subsampled_pos_orig_vals) if x[0]
>= (1-self.target_fdr)]+[subsampled_pos_orig_vals[-1]])[0]
if (len(subsampled_neg_orig_vals) > 0):
neg_threshold = ([x[1] for x in
zip(subsampled_neg_val_precisions,
subsampled_neg_orig_vals) if x[0]
>= (1-self.target_fdr)]+[subsampled_neg_orig_vals[-1]])[0]
else:
neg_threshold = -np.inf
pos_threshold, neg_threshold =\
refine_thresholds_based_on_frac_passing(
vals=subsampled_orig_vals,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold,
min_passing_windows_frac=self.min_passing_windows_frac,
max_passing_windows_frac=self.max_passing_windows_frac,
separate_pos_neg_thresholds=self.separate_pos_neg_thresholds,
verbose=self.verbose)
if (self.separate_pos_neg_thresholds):
val_transformer = SignedPercentileValTransformer(
distribution=orig_vals)
else:
val_transformer = AbsPercentileValTransformer(
distribution=orig_vals)
if (self.verbose):
print("Final raw thresholds are",
neg_threshold," and ",pos_threshold)
print("Final transformed thresholds are",
val_transformer(neg_threshold)," and ",
val_transformer(pos_threshold))
make_nulldist_figure(orig_vals=subsampled_orig_vals,
null_vals=subsampled_null_vals,
pos_ir=pos_ir, neg_ir=neg_ir,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold)
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="scoredist_"
+str(FixedWindowAroundChunks.count)+".png")
FixedWindowAroundChunks.count += 1
tnt_results = FWACTransformAndThresholdResults(
neg_threshold=neg_threshold,
transformed_neg_threshold=val_transformer(neg_threshold),
pos_threshold=pos_threshold,
transformed_pos_threshold=val_transformer(pos_threshold),
val_transformer=val_transformer)
coords = identify_coords(
score_track=original_summed_score_track,
pos_threshold=tnt_results.pos_threshold,
neg_threshold=tnt_results.neg_threshold,
window_size=self.sliding,
flank=self.flank,
suppress=self.suppress,
max_seqlets_total=self.max_seqlets_total,
verbose=self.verbose)
return CoordProducerResults(
coords=coords,
tnt_results=tnt_results)
| StarcoderdataPython |
43733 | from src.objects.Track import Track
from src.usesful_func import start_pygame_headless
start_pygame_headless()
track = Track("tracks/tiny.tra")
def test_car_human():
from src.cars.CarHuman import CarHuman
car = CarHuman(track)
assert car
def test_car_ai():
from src.cars.CarAI import CarAI
from src.objects.NeuralNet import NeuralNet
nn = NeuralNet.from_path("models/raw/cnn_light.net")
car = CarAI(track=track, neural_net=nn)
assert car
| StarcoderdataPython |
3208567 | <gh_stars>0
import ab
print(ab.a) | StarcoderdataPython |
179883 | #!/usr/bin/env python
from PartsManager import PartsManager
from MachinePNP import Machine
from getchar import getchar
import time, os, pickle, sys, math
# coordinate systems
# G53 machine
# G54 camera
# G55 paste
# G56 place
# G57 part tape 0
try:
import BeautifulSoup
except:
import bs4 as BeautifulSoup
class Placer:
def __init__(self,brd):
self.pm = PartsManager(brd)
self.m = Machine()
self.statusTimeout = 0
self.pmTimeout = 0
self.running = True
self.screenClear()
if os.path.exists("config.placer.p"):
self.configRecall()
else:
self.config = {}
self.config['partHeightMax'] = 10.0
self.config['placedPartHeight'] = 0.5
self.config['statusInterval'] = 0.25
self.config['pmInterval'] = 5.0
self.config['solderMoveInPartHeight'] = 3.0
self.config['tapeOffset'] = 4.0
self.config['tapeCount'] = 0
self.config['handPlaceLocation'] = (140,50)
self.config['jogHeight'] = 13.0
fnt = open(brd).read()
try:
self.soup = BeautifulSoup.BeautifulSoup(fnt,"lxml")
except:
self.soup = BeautifulSoup.BeautifulSoup(fnt)
self.dimensionFind()
self.configSave()
self.screenClear()
def partList(self):
self.screenPos(1,1)
self.screenClear('eol')
ans = raw_input("part? ")
list = []
if ans:
if self.pm.values.has_key(ans):
list = self.pm.values[ans]
else:
ans = ans.upper() # its a part
if self.pm.parts.has_key(ans):
list = [ans] # user gave us a part name, not a value
self.screenPos(1,3)
self.screenClear('eol')
return list
def dimensionFind(self):
xmax = 0
ymax = 0
for wire in self.soup.findAll('wire',{'layer':'20'}):
x = int(wire['x1'])
if x > xmax:
xmax = x
y = int(wire['y1'])
if y > ymax:
ymax = y
self.config['xmax'] = xmax
self.config['ymax'] = ymax
def machineStatus(self):
self.screenPos(1,5)
print "Machine"
self.m.statusPrint()
def partsStatus(self):
self.screenPos(1,9)
self.screenClear('eos')
print "Parts"
self.pm.statusPrint()
def loop(self):
self.m.loop()
c = getchar()
if c:
self.handler(c)
if time.time() > self.statusTimeout:
self.machineStatus()
self.statusTimeout = time.time() + self.config['statusInterval'];
if time.time() > self.pmTimeout:
self.partsStatus()
self.pmTimeout = time.time() + self.config['pmInterval'];
def configSave(self):
pickle.dump(self.config,open("config.placer.p","wb"))
def configRecall(self):
self.config = pickle.load(open("config.placer.p", "rb" ))
def info(self):
self.pm.info()
self.m.info()
def screenClear(self,what='screen'):
'''
erase functions:
what: screen => erase screen and go home
line => erase line and go to start of line
bos => erase to begin of screen
eos => erase to end of screen
bol => erase to begin of line
eol => erase to end of line
'''
clear = {
'screen': '\x1b[2J\x1b[H',
'line': '\x1b[2K\x1b[G',
'bos': '\x1b[1J',
'eos': '\x1b[J',
'bol': '\x1b[1K',
'eol': '\x1b[K',
}
sys.stdout.write(clear[what])
sys.stdout.flush()
def screenPos(self,x,y):
sys.stdout.write('\x1b[%d;%dH'%(y,x))
sys.stdout.flush()
def setup(self):
self.m.send("G0 G90")
for i in range(5):
self.m.send("X0Y0")
#self.m.send("G4P1")
self.m.send("X%dY%d"%(self.config['xmax'],self.config['ymax']))
#self.m.send("G4P1")
self.m.send("X%dY0"%self.config['xmax'])
#self.m.send("G4P1")
self.m.send("X0Y%d"%self.config['ymax'])
#self.m.send("G4P1")
#self.m.send("X0Y0")
#self.m.send("G4P1")
self.m.send("X0Y0")
def handler(self,c):
if c == "q":
self.running = False
'''
if c == "i":
self.m.send("G0 G91 Y0.1")
if c == "k":
self.m.send("G0 G91 Y-0.1")
if c == "j":
self.m.send("G0 G91 X-0.1")
if c == "l":
self.m.send("G0 G91 X0.1")
if c == "I":
self.m.send("G0 G91 Y1.0")
if c == "K":
self.m.send("G0 G91 Y-1.0")
if c == "J":
self.m.send("G0 G91 X-1.0")
if c == "L":
self.m.send("G0 G91 X1.0")
if c == "d":
self.m.send("G0 G91 Z-0.1")
if c == "e":
self.m.send("G0 G91 Z0.1")
if c == "D":
self.m.send("G0 G91 Z-1.0")
if c == "E":
self.m.send("G0 G91 Z1.0")
'''
if c == "E":
self.m.send("G0 G91 Z1.0")
if c == "H":
self.home()
if c == "?":
self.screenClear()
if c == "g":
self.partInspect()
if c == "s":
self.partSolder()
if c == "p":
self.partPlace()
if c == "v":
self.m.vacuumToggle()
if c == "R":
self.partRemove()
if c == "X":
self.setup()
if c == "C":
self.calibate()
def calibate(self):
xlen = 188.00
ylen = 88.00
cal = 36.576
self.screenPos(1,1)
self.screenClear('line')
ans = raw_input("calibrate x,y? ")
if ans:
x,y = ans.split(',')
x = (xlen/float(x))*cal
y = (ylen/float(y))*cal
print "x %.8f\ny %.8f"%(x,y)
def partSolder(self):
list = self.partList()
if list:
self.m.mdi()
tempParts = []
for pname in list:
tempParts.append(self.pm.parts[pname])
partCur = {'x':self.m.x,'y':self.m.y}
partGroup = []
partNearest = None
while len(tempParts):
dmin = 10000
for p in tempParts:
d = math.sqrt(math.pow(partCur['x']-p['x'],2) + math.pow(partCur['y']-p['y'],2))
if d < dmin:
partNearest = p
dmin = d
partGroup.append(partNearest)
tempParts.remove(partNearest)
partCur = partNearest
self.m.send("G0 G90")
for part in partGroup:
pads = part['pads']
#self.m.send("G0 G90 Z%-0.4f"%(self.m.config['solderzoffset']+self.config['partHeightMax']))
for pad in pads:
x,y = pad
self.screenPos(1,1)
self.screenClear('eol')
print "solder paste for %4s at %0.2f,%0.2f"%(part['name'],x,y)
self.m.send("X%0.4f Y%0.4f"%(x,y))
self.m.pressureOn()
#self.m.send("G0 G90 Z%-0.4f"%self.m.config['solderzoffset'])
self.m.send("G4 P0.1") # dwell
self.m.pressureOff()
#self.m.send("G0 G90 Z%-0.4f"%(self.m.config['solderzoffset']+self.config['solderMoveInPartHeight']))
time.sleep(2)
#self.m.send("G0 G90 Z%0.2f"%self.config['partHeightMax'])
#self.screenPos(1,3)
#self.screenClear('eos')
self.pmTimeout = 0 # forces refresh
def partRemove(self):
self.screenPos(1,3)
ans = raw_input("remove part? ")
if len(ans):
self.pm.partDelete(ans)
self.screenPos(1,3)
self.screenClear('eos')
self.pmTimeout = 0 # forces refresh
def partPlace(self):
list = self.partList()
if list:
self.m.mdi()
self.config['tapeCount'] = 0
self.configSave()
self.m.send("G0 G90")
for pname in list:
self.screenPos(1,1)
self.screenClear('eol')
part = self.pm.parts[pname]
x = part['x']
y = part['y']
r = part['rotation']
print "placing %4s at %0.2f,%0.2f@%0.1f"%(pname,x,y,r)
#self.m.send("Z%0.2f"%self.config['partHeightMax'])
self.m.send("G57 X%0.4f Y0.00"%(self.config['tapeCount']*self.config['tapeOffset']))
#self.m.send("Z%0.4f"%self.m.config['pv0zoffset'])
self.m.vacuumOn();
self.m.send("G4 P1.0") #dwell
#self.m.send("Z%0.2f"%self.config['partHeightMax'])
self.m.send("G54 X%0.4f Y%0.4f"%(x,y))
self.config['tapeCount'] += 1
self.configSave()
#self.m.send("G0 G90 Z%0.2f"%self.config['placedPartHeight'])
self.m.vacuumOff();
self.m.send("G4 P1.0")
#self.m.send("G0 G90 Z%0.2f"%self.config['partHeightMax'])
self.m.waitForIdle()
self.pm.partDelete(pname)
self.pmInfo()
#self.m.send("G0 G90 Z%0.2f"%self.config['partHeightMax'])
self.pmTimeout = 0 # forces refresh
def partInspect(self):
list = self.partList()
if list:
self.m.mdi()
for pname in list:
self.screenPos(1,1)
self.screenClear('eol')
print "inspect %s"%pname
self.m.send("G54 G0 G90"); # pick camera coordinate
self.m.send("Z%0.2f"%self.config['jogHeight']) #move to safe height
part = self.pm.parts[pname]
self.m.send("X%0.2f Y%0.2f"%(part['x'],part['y']))
self.m.send("G4 P1.5") #dwell
def offsetSet(self):
self.screenPos(1,3)
ans = raw_input("SET offset H,v,h,s,m? ")
if ans == 'v':
self.m.pv0OffsetSet()
if ans == 'h':
self.m.ph0OffsetSet()
if ans == 's':
self.m.solderOffsetSet()
if ans == 'H':
self.m.homeSet()
if ans == 'm':
self.m.manualOffsetSet()
self.screenPos(1,3)
self.screenClear('line')
def go(self):
self.screenPos(1,3)
ans = raw_input("go offset H,v,h,s,p,m,M? ")
if ans == 'v':
self.m.pv0OffsetGo(self.config['partHeightMax'])
if ans == 'h':
self.m.ph0OffsetGo(self.config['partHeightMax'])
if ans == 's':
self.screenPos(1,3)
ans = raw_input("place tool removed y? ")
if ans == "y":
self.m.solderOffsetGo(self.config['partHeightMax'])
if ans == 'H':
self.m.homeGo()
if ans == 'p':
self.m.send("G0 G90 Z%0.2f"%self.config['partHeightMax'])
self.m.send("G0 G90 X%0.2f Y%0.2f"%self.config['handPlaceLocation'])
if ans == 'm':
self.m.manualOffsetGo(self.config['partHeightMax'])
if ans == 'M':
self.moveToPart()
self.screenPos(1,3)
self.screenClear('line')
def home(self):
self.m.manual()
while True:
self.screenPos(1,1)
self.screenClear('line')
ans = raw_input("home x,y,z,a? ")
if ans:
if ans == 'x':
self.m.command.home(0)
if ans == 'y':
self.m.command.home(1)
if ans == 'z':
self.m.command.home(2)
if ans == 'a':
self.m.command.home(3)
else:
break
self.screenPos(1,1)
self.screenClear('line')
if __name__ == "__main__":
#os.system("rm -f config.placer.p")
p = Placer('a.brd')
while p.running:
p.loop()
| StarcoderdataPython |
3295789 | <filename>tests/test_latentdistributiontest.py
# <NAME>
# bvarjavand [at] jhu.edu
# 02.26.2019
import unittest
import numpy as np
from graspy.inference import LatentDistributionTest
from graspy.simulations import er_np, sbm
class TestLatentDistributionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(123456)
cls.A1 = er_np(20, 0.3)
cls.A2 = er_np(20, 0.3)
def test_fit_p_ase_works(self):
npt = LatentDistributionTest()
p = npt.fit(self.A1, self.A2)
def test_bad_kwargs(self):
with self.assertRaises(ValueError):
LatentDistributionTest(n_components=-100)
with self.assertRaises(ValueError):
LatentDistributionTest(n_bootstraps=-100)
with self.assertRaises(TypeError):
LatentDistributionTest(n_bootstraps=0.5)
with self.assertRaises(TypeError):
LatentDistributionTest(n_components=0.5)
with self.assertRaises(TypeError):
LatentDistributionTest(bandwidth="oops")
def test_n_bootstraps(self):
npt = LatentDistributionTest(n_bootstraps=234, n_components=None)
npt.fit(self.A1, self.A2)
self.assertEqual(npt.null_distribution_.shape[0], 234)
def test_bad_matrix_inputs(self):
npt = LatentDistributionTest()
bad_matrix = [[1, 2]]
with self.assertRaises(TypeError):
npt.fit(bad_matrix, self.A2)
def test_directed_inputs(self):
np.random.seed(2)
A = er_np(100, 0.3, directed=True)
B = er_np(100, 0.3, directed=True)
npt = LatentDistributionTest()
with self.assertRaises(NotImplementedError):
npt.fit(A, B)
def test_different_sizes(self):
np.random.seed(3)
A = er_np(50, 0.3)
B = er_np(100, 0.3)
npt = LatentDistributionTest()
with self.assertRaises(ValueError):
npt.fit(A, B)
def test_SBM_epsilon(self):
np.random.seed(12345678)
B1 = np.array([[0.5, 0.2], [0.2, 0.5]])
B2 = np.array([[0.7, 0.2], [0.2, 0.7]])
b_size = 200
A1 = sbm(2 * [b_size], B1)
A2 = sbm(2 * [b_size], B1)
A3 = sbm(2 * [b_size], B2)
npt_null = LatentDistributionTest(n_components=2, n_bootstraps=100)
npt_alt = LatentDistributionTest(n_components=2, n_bootstraps=100)
p_null = npt_null.fit(A1, A2)
p_alt = npt_alt.fit(A1, A3)
self.assertTrue(p_null > 0.05)
self.assertTrue(p_alt <= 0.05)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1773199 | # Generated by Django 4.0.3 on 2022-04-05 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0010_sitesettings_moderator_reply_name_and_more'),
]
operations = [
migrations.AddField(
model_name='sitesettings',
name='comment_posting',
field=models.BooleanField(default=True, help_text='Allow users to submit new comments', verbose_name='Allow comment posting'),
),
migrations.AddField(
model_name='sitesettings',
name='show_comments',
field=models.BooleanField(default=True, help_text='Show existing user comments, if disabled, this will disable new comment submission.', verbose_name='Show existing comments'),
),
]
| StarcoderdataPython |
77736 | """Реализация разделов сайта для работы с пользователями."""
import os
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required, login_user, logout_user
from werkzeug.urls import url_parse
from webapp.account.models import Account
from webapp.account.schemas import AccountSchema
from webapp.db import db
from webapp.item.schemas import DescriptionSchema, ItemSchema
from webapp.user.forms import LoginForm, RegistrationForm
from webapp.user.models import User
from webapp.user.schemas import UserSchema
blueprint = Blueprint('user', __name__, url_prefix='/users')
@blueprint.route('/login', methods=['GET', 'POST'])
def login():
"""Авторизация пользователя."""
title = 'Авторизация'
if current_user.is_authenticated:
return redirect(url_for(
'user.profile',
username=current_user.username,
))
form = LoginForm()
if form.validate_on_submit():
user = db.session.query(User).filter_by(
username=form.username.data,
).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password', 'danger')
return redirect(url_for('user.login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('user.profile',
username=form.username.data)
return redirect(next_page)
template_path = os.path.join('user', 'login.html')
return render_template(
template_path,
title=title,
form=form,
)
@blueprint.route('/logout')
def logout():
"""Выход из учетной записи."""
logout_user()
return redirect(url_for('user.login'))
@blueprint.route('/register', methods=['GET', 'POST'])
def register():
"""Регистрация нового пользователя."""
title = 'Регистрация'
if current_user.is_authenticated:
return redirect(url_for('user.login'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('You are now a registered user!', 'success')
return redirect(url_for('user.login'))
template_path = os.path.join('user', 'register.html')
return render_template(
template_path,
title=title,
form=form,
)
@blueprint.route('/<username>')
@login_required
def profile(username):
"""Профиль зарегистрированного пользователя."""
user = db.session.query(User).filter_by(username=username).first_or_404()
accounts = user.accounts.all()
template_path = os.path.join('user', 'profile.html')
return render_template(
template_path,
user=user,
accounts=accounts,
)
| StarcoderdataPython |
3233956 | <gh_stars>1-10
##############################################################################
# cpuspinner.py
##############################################################################
# Class used to keep the game looping. Generates a tick event each time
# the game loops.
##############################################################################
# 06/12 - Flembobs
##############################################################################
from systemevents import *
import pygame
class CPUSpinner(SystemEventListener):
def __init__(self,fps):
SystemEventListener.__init__(self)
#desired FPS
self.fps = fps
#boolean to indicate whether we should keep running
self.running = True
#clock used to maintain FPS
self.clock = pygame.time.Clock()
#--------------------------------------------------------------------------
def run(self):
while(self.running):
self.clock.tick(self.fps)
event = TickEvent()
SystemEventManager.post(event)
#--------------------------------------------------------------------------
def notify(self,event):
if isinstance(event,QuitEvent):
self.running = False | StarcoderdataPython |
3215765 | <reponame>ViliamVadocz/AutoLeague2
import shutil
from typing import Mapping, Tuple, Optional
from rlbot.parsing.bot_config_bundle import BotConfigBundle
from rlbot.setup_manager import setup_manager_context
from rlbot.training.training import Fail
from rlbottraining.exercise_runner import run_playlist, RenderPolicy
from bots import BotID
from match import MatchDetails, MatchResult
from match_exercise import MatchExercise, MatchGrader
from overlay import make_overlay
from paths import LeagueDir
from replays import ReplayPreference, ReplayMonitor, ReplayData
from popup import confirm_match_result
def run_match(ld: LeagueDir, match_details: MatchDetails, bots: Mapping[BotID, BotConfigBundle],
replay_preference: ReplayPreference) -> Tuple[MatchResult, Optional[ReplayData]]:
"""
Run a match, wait for it to finish, and return the result.
"""
with setup_manager_context() as setup_manager:
# Expose data to overlay
make_overlay(ld, match_details, bots)
# Prepare the match exercise
print(f"Starting match: {match_details.blue} vs {match_details.orange}. Waiting for match to finish...")
match = MatchExercise(
name=match_details.name,
match_config=match_details.to_config(bots),
grader=MatchGrader(
replay_monitor=ReplayMonitor(replay_preference=replay_preference),
)
)
# If any bots have signed up for early start, give them 10 seconds.
# This is typically enough for Scratch.
setup_manager.early_start_seconds = 10
# For loop, but should only run exactly once
for exercise_result in run_playlist([match], setup_manager=setup_manager,
render_policy=RenderPolicy.DEFAULT):
replay_data = None
# Warn if no replay was found
replay_data = exercise_result.exercise.grader.replay_monitor.replay_data()
if isinstance(exercise_result.grade, Fail) and replay_data.replay_id is None:
print(f"WARNING: No replay was found for the match '{match_details.name}'.")
else:
if replay_preference != ReplayPreference.NONE and replay_data.replay_path is not None:
try:
dst = ld.replays / f"{replay_data.replay_id}.replay"
shutil.copy(replay_data.replay_path, dst)
print("Replay successfully copied to replays directory")
except:
pass
match_result = confirm_match_result(exercise_result.exercise.grader.match_result)
return match_result, replay_data
| StarcoderdataPython |
1704688 | <reponame>OP2/PyOP2<filename>pyop2/types/dataset.py
import numbers
import numpy as np
from petsc4py import PETSc
from pyop2 import (
caching,
datatypes as dtypes,
exceptions as ex,
mpi,
utils
)
from pyop2.types.set import ExtrudedSet, GlobalSet, MixedSet, Set, Subset
class DataSet(caching.ObjectCached):
"""PyOP2 Data Set
Set used in the op2.Dat structures to specify the dimension of the data.
"""
@utils.validate_type(('iter_set', Set, ex.SetTypeError),
('dim', (numbers.Integral, tuple, list), ex.DimTypeError),
('name', str, ex.NameTypeError))
def __init__(self, iter_set, dim=1, name=None):
if isinstance(iter_set, ExtrudedSet):
raise NotImplementedError("Not allowed!")
if self._initialized:
return
if isinstance(iter_set, Subset):
raise NotImplementedError("Deriving a DataSet from a Subset is unsupported")
self._set = iter_set
self._dim = utils.as_tuple(dim, numbers.Integral)
self._cdim = np.prod(self._dim).item()
self._name = name or "dset_#x%x" % id(self)
self._initialized = True
@classmethod
def _process_args(cls, *args, **kwargs):
return (args[0], ) + args, kwargs
@classmethod
def _cache_key(cls, iter_set, dim=1, name=None):
return (iter_set, utils.as_tuple(dim, numbers.Integral))
@utils.cached_property
def _wrapper_cache_key_(self):
return (type(self), self.dim, self._set._wrapper_cache_key_)
def __getstate__(self):
"""Extract state to pickle."""
return self.__dict__
def __setstate__(self, d):
"""Restore from pickled state."""
self.__dict__.update(d)
# Look up any unspecified attributes on the _set.
def __getattr__(self, name):
"""Returns a Set specific attribute."""
value = getattr(self.set, name)
setattr(self, name, value)
return value
def __getitem__(self, idx):
"""Allow index to return self"""
assert idx == 0
return self
@utils.cached_property
def dim(self):
"""The shape tuple of the values for each element of the set."""
return self._dim
@utils.cached_property
def cdim(self):
"""The scalar number of values for each member of the set. This is
the product of the dim tuple."""
return self._cdim
@utils.cached_property
def name(self):
"""Returns the name of the data set."""
return self._name
@utils.cached_property
def set(self):
"""Returns the parent set of the data set."""
return self._set
def __iter__(self):
"""Yield self when iterated over."""
yield self
def __len__(self):
"""This is not a mixed type and therefore of length 1."""
return 1
def __str__(self):
return "OP2 DataSet: %s on set %s, with dim %s" % \
(self._name, self._set, self._dim)
def __repr__(self):
return "DataSet(%r, %r, %r)" % (self._set, self._dim, self._name)
def __contains__(self, dat):
"""Indicate whether a given Dat is compatible with this DataSet."""
return dat.dataset == self
@utils.cached_property
def lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet`.
"""
lgmap = PETSc.LGMap()
if self.comm.size == 1:
lgmap.create(indices=np.arange(self.size, dtype=dtypes.IntType),
bsize=self.cdim, comm=self.comm)
else:
lgmap.create(indices=self.halo.local_to_global_numbering,
bsize=self.cdim, comm=self.comm)
return lgmap
@utils.cached_property
def scalar_lgmap(self):
if self.cdim == 1:
return self.lgmap
indices = self.lgmap.block_indices
return PETSc.LGMap().create(indices=indices, bsize=1, comm=self.comm)
@utils.cached_property
def unblocked_lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet` with a block size of 1.
"""
if self.cdim == 1:
return self.lgmap
else:
indices = self.lgmap.indices
lgmap = PETSc.LGMap().create(indices=indices,
bsize=1, comm=self.lgmap.comm)
return lgmap
@utils.cached_property
def field_ises(self):
"""A list of PETSc ISes defining the global indices for each set in
the DataSet.
Used when extracting blocks from matrices for solvers."""
ises = []
nlocal_rows = 0
for dset in self:
nlocal_rows += dset.size * dset.cdim
offset = self.comm.scan(nlocal_rows)
offset -= nlocal_rows
for dset in self:
nrows = dset.size * dset.cdim
iset = PETSc.IS().createStride(nrows, first=offset, step=1,
comm=self.comm)
iset.setBlockSize(dset.cdim)
ises.append(iset)
offset += nrows
return tuple(ises)
@utils.cached_property
def local_ises(self):
"""A list of PETSc ISes defining the local indices for each set in the DataSet.
Used when extracting blocks from matrices for assembly."""
ises = []
start = 0
for dset in self:
bs = dset.cdim
n = dset.total_size*bs
iset = PETSc.IS().createStride(n, first=start, step=1,
comm=mpi.COMM_SELF)
iset.setBlockSize(bs)
start += n
ises.append(iset)
return tuple(ises)
@utils.cached_property
def layout_vec(self):
"""A PETSc Vec compatible with the dof layout of this DataSet."""
vec = PETSc.Vec().create(comm=self.comm)
size = (self.size * self.cdim, None)
vec.setSizes(size, bsize=self.cdim)
vec.setUp()
return vec
@utils.cached_property
def dm(self):
dm = PETSc.DMShell().create(comm=self.comm)
dm.setGlobalVector(self.layout_vec)
return dm
class GlobalDataSet(DataSet):
"""A proxy :class:`DataSet` for use in a :class:`Sparsity` where the
matrix has :class:`Global` rows or columns."""
def __init__(self, global_):
"""
:param global_: The :class:`Global` on which this object is based."""
self._global = global_
self._globalset = GlobalSet(comm=self.comm)
self._name = "gdset_#x%x" % id(self)
@classmethod
def _cache_key(cls, *args):
return None
@utils.cached_property
def dim(self):
"""The shape tuple of the values for each element of the set."""
return self._global._dim
@utils.cached_property
def cdim(self):
"""The scalar number of values for each member of the set. This is
the product of the dim tuple."""
return self._global._cdim
@utils.cached_property
def name(self):
"""Returns the name of the data set."""
return self._global._name
@utils.cached_property
def comm(self):
"""Return the communicator on which the set is defined."""
return self._global.comm
@utils.cached_property
def set(self):
"""Returns the parent set of the data set."""
return self._globalset
@utils.cached_property
def size(self):
"""The number of local entries in the Dataset (1 on rank 0)"""
return 1 if mpi.MPI.comm.rank == 0 else 0
def __iter__(self):
"""Yield self when iterated over."""
yield self
def __len__(self):
"""This is not a mixed type and therefore of length 1."""
return 1
def __str__(self):
return "OP2 GlobalDataSet: %s on Global %s" % \
(self._name, self._global)
def __repr__(self):
return "GlobalDataSet(%r)" % (self._global)
@utils.cached_property
def lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet`.
"""
lgmap = PETSc.LGMap()
lgmap.create(indices=np.arange(1, dtype=dtypes.IntType),
bsize=self.cdim, comm=self.comm)
return lgmap
@utils.cached_property
def unblocked_lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet` with a block size of 1.
"""
if self.cdim == 1:
return self.lgmap
else:
indices = self.lgmap.indices
lgmap = PETSc.LGMap().create(indices=indices,
bsize=1, comm=self.lgmap.comm)
return lgmap
@utils.cached_property
def field_ises(self):
"""A list of PETSc ISes defining the global indices for each set in
the DataSet.
Used when extracting blocks from matrices for solvers."""
ises = []
nlocal_rows = 0
for dset in self:
nlocal_rows += dset.size * dset.cdim
offset = self.comm.scan(nlocal_rows)
offset -= nlocal_rows
for dset in self:
nrows = dset.size * dset.cdim
iset = PETSc.IS().createStride(nrows, first=offset, step=1,
comm=self.comm)
iset.setBlockSize(dset.cdim)
ises.append(iset)
offset += nrows
return tuple(ises)
@utils.cached_property
def local_ises(self):
"""A list of PETSc ISes defining the local indices for each set in the DataSet.
Used when extracting blocks from matrices for assembly."""
raise NotImplementedError
@utils.cached_property
def layout_vec(self):
"""A PETSc Vec compatible with the dof layout of this DataSet."""
vec = PETSc.Vec().create(comm=self.comm)
size = (self.size * self.cdim, None)
vec.setSizes(size, bsize=self.cdim)
vec.setUp()
return vec
@utils.cached_property
def dm(self):
dm = PETSc.DMShell().create(comm=self.comm)
dm.setGlobalVector(self.layout_vec)
return dm
class MixedDataSet(DataSet):
r"""A container for a bag of :class:`DataSet`\s.
Initialized either from a :class:`MixedSet` and an iterable or iterator of
``dims`` of corresponding length ::
mdset = op2.MixedDataSet(mset, [dim1, ..., dimN])
or from a tuple of :class:`Set`\s and an iterable of ``dims`` of
corresponding length ::
mdset = op2.MixedDataSet([set1, ..., setN], [dim1, ..., dimN])
If all ``dims`` are to be the same, they can also be given as an
:class:`int` for either of above invocations ::
mdset = op2.MixedDataSet(mset, dim)
mdset = op2.MixedDataSet([set1, ..., setN], dim)
Initialized from a :class:`MixedSet` without explicitly specifying ``dims``
they default to 1 ::
mdset = op2.MixedDataSet(mset)
Initialized from an iterable or iterator of :class:`DataSet`\s and/or
:class:`Set`\s, where :class:`Set`\s are implicitly upcast to
:class:`DataSet`\s of dim 1 ::
mdset = op2.MixedDataSet([dset1, ..., dsetN])
"""
def __init__(self, arg, dims=None):
r"""
:param arg: a :class:`MixedSet` or an iterable or a generator
expression of :class:`Set`\s or :class:`DataSet`\s or a
mixture of both
:param dims: `None` (the default) or an :class:`int` or an iterable or
generator expression of :class:`int`\s, which **must** be
of same length as `arg`
.. Warning ::
When using generator expressions for ``arg`` or ``dims``, these
**must** terminate or else will cause an infinite loop.
"""
if self._initialized:
return
self._dsets = arg
self._initialized = True
@classmethod
def _process_args(cls, arg, dims=None):
# If the second argument is not None it is expect to be a scalar dim
# or an iterable of dims and the first is expected to be a MixedSet or
# an iterable of Sets
if dims is not None:
# If arg is a MixedSet, get its Sets tuple
sets = arg.split if isinstance(arg, MixedSet) else tuple(arg)
# If dims is a scalar, turn it into a tuple of right length
dims = (dims,) * len(sets) if isinstance(dims, int) else tuple(dims)
if len(sets) != len(dims):
raise ValueError("Got MixedSet of %d Sets but %s dims" %
(len(sets), len(dims)))
dsets = tuple(s ** d for s, d in zip(sets, dims))
# Otherwise expect the first argument to be an iterable of Sets and/or
# DataSets and upcast Sets to DataSets as necessary
else:
arg = [s if isinstance(s, DataSet) else s ** 1 for s in arg]
dsets = utils.as_tuple(arg, type=DataSet)
return (dsets[0].set, ) + (dsets, ), {}
@classmethod
def _cache_key(cls, arg, dims=None):
return arg
@utils.cached_property
def _wrapper_cache_key_(self):
raise NotImplementedError
def __getitem__(self, idx):
"""Return :class:`DataSet` with index ``idx`` or a given slice of datasets."""
return self._dsets[idx]
@utils.cached_property
def split(self):
r"""The underlying tuple of :class:`DataSet`\s."""
return self._dsets
@utils.cached_property
def dim(self):
"""The shape tuple of the values for each element of the sets."""
return tuple(s.dim for s in self._dsets)
@utils.cached_property
def cdim(self):
"""The sum of the scalar number of values for each member of the sets.
This is the sum of products of the dim tuples."""
return sum(s.cdim for s in self._dsets)
@utils.cached_property
def name(self):
"""Returns the name of the data sets."""
return tuple(s.name for s in self._dsets)
@utils.cached_property
def set(self):
"""Returns the :class:`MixedSet` this :class:`MixedDataSet` is
defined on."""
return MixedSet(s.set for s in self._dsets)
def __iter__(self):
r"""Yield all :class:`DataSet`\s when iterated over."""
for ds in self._dsets:
yield ds
def __len__(self):
"""Return number of contained :class:`DataSet`s."""
return len(self._dsets)
def __str__(self):
return "OP2 MixedDataSet composed of DataSets: %s" % (self._dsets,)
def __repr__(self):
return "MixedDataSet(%r)" % (self._dsets,)
@utils.cached_property
def layout_vec(self):
"""A PETSc Vec compatible with the dof layout of this MixedDataSet."""
vec = PETSc.Vec().create(comm=self.comm)
# Compute local and global size from sizes of layout vecs
lsize, gsize = map(sum, zip(*(d.layout_vec.sizes for d in self)))
vec.setSizes((lsize, gsize), bsize=1)
vec.setUp()
return vec
@utils.cached_property
def lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`MixedDataSet`.
"""
lgmap = PETSc.LGMap()
if self.comm.size == 1:
size = sum(s.size * s.cdim for s in self)
lgmap.create(indices=np.arange(size, dtype=dtypes.IntType),
bsize=1, comm=self.comm)
return lgmap
# Compute local to global maps for a monolithic mixed system
# from the individual local to global maps for each field.
# Exposition:
#
# We have N fields and P processes. The global row
# ordering is:
#
# f_0_p_0, f_1_p_0, ..., f_N_p_0; f_0_p_1, ..., ; f_0_p_P,
# ..., f_N_p_P.
#
# We have per-field local to global numberings, to convert
# these into multi-field local to global numberings, we note
# the following:
#
# For each entry in the per-field l2g map, we first determine
# the rank that entry belongs to, call this r.
#
# We know that this must be offset by:
# 1. The sum of all field lengths with rank < r
# 2. The sum of all lower-numbered field lengths on rank r.
#
# Finally, we need to shift the field-local entry by the
# current field offset.
idx_size = sum(s.total_size*s.cdim for s in self)
indices = np.full(idx_size, -1, dtype=dtypes.IntType)
owned_sz = np.array([sum(s.size * s.cdim for s in self)],
dtype=dtypes.IntType)
field_offset = np.empty_like(owned_sz)
self.comm.Scan(owned_sz, field_offset)
field_offset -= owned_sz
all_field_offsets = np.empty(self.comm.size, dtype=dtypes.IntType)
self.comm.Allgather(field_offset, all_field_offsets)
start = 0
all_local_offsets = np.zeros(self.comm.size, dtype=dtypes.IntType)
current_offsets = np.zeros(self.comm.size + 1, dtype=dtypes.IntType)
for s in self:
idx = indices[start:start + s.total_size * s.cdim]
owned_sz[0] = s.size * s.cdim
self.comm.Scan(owned_sz, field_offset)
self.comm.Allgather(field_offset, current_offsets[1:])
# Find the ranks each entry in the l2g belongs to
l2g = s.unblocked_lgmap.indices
tmp_indices = np.searchsorted(current_offsets, l2g, side="right") - 1
idx[:] = l2g[:] - current_offsets[tmp_indices] + \
all_field_offsets[tmp_indices] + all_local_offsets[tmp_indices]
self.comm.Allgather(owned_sz, current_offsets[1:])
all_local_offsets += current_offsets[1:]
start += s.total_size * s.cdim
lgmap.create(indices=indices, bsize=1, comm=self.comm)
return lgmap
@utils.cached_property
def unblocked_lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet` with a block size of 1.
"""
return self.lgmap
| StarcoderdataPython |
3390974 | import numpy
from .AssemblyFlaw import AssemblyFlaw
class Assembly:
"""Base class to represent assemblies. See GibsonAssembly, BASICAssembly,
etc. for usage classes
Parameters
----------
parts
List of part names corresponding to part records in a repository
name
Name of the assembly as it will appear in reports.
max_constructs
None or a number of maximum assemblies to compute (avoids complete
freeze for combinatorial assemblies with extremely many possibilities).
expected_constructs
Either a number or a string ``'any_number'``. If the number of constructs
doesn't match this value, the assembly will be considered invalid in
reports and summaries
connectors_collection
Name of a collection in the repository from which to get candidates for
connector autocompletion.
dependencies
(do not use). Metadata indicating which assemblies depend on this
assembly, or are depended on by it.
"""
spreadsheet_import_parameters = ()
def __init__(
self,
parts,
name="unnamed_assembly",
max_constructs=40,
dependencies=None,
expected_constructs=1,
connectors_collection=None,
):
self.name = name
self.parts = parts
self.max_constructs = max_constructs
if dependencies is None:
dependencies = dict(level=1, depends_on=[], used_in=[])
self.dependencies = dependencies
self.expected_constructs = expected_constructs
self.connectors_collection = connectors_collection
def get_extra_construct_data(self):
return dict()
@staticmethod
def _row_nonempty_cells(row):
empty_cells_contain = ["-", "nan", "None", "_", ""]
return [str(e) for e in row if str(e) not in empty_cells_contain]
@classmethod
def from_dataframe_row(cls, row):
"""This class indicates how a particular assembly can be built from a
spreadsheet row"""
line = cls._row_nonempty_cells(row)
assembly_name = line[0]
parts, parameters = [], dict()
for cell in line[1:]:
for param in cls.spreadsheet_import_parameters:
prefix = param + ": "
if cell.startswith(prefix):
parameters[param] = format_string(cell[len(prefix) :])
break
else:
parts.append(cell)
return cls(name=assembly_name, parts=parts, **parameters)
def attribute_ids_to_constructs(self, construct_records):
"""Defines how constructs are named, in particular in the context of
combinatorial assemblies."""
n_records = len(construct_records)
if n_records == 0:
return
if n_records == 1:
construct_records[0].id = self.name
else:
digits = int(numpy.ceil(numpy.log10(n_records - 1)))
for i, record in enumerate(construct_records):
record.id = "{name}_{num:0{digits}}".format(
num=i + 1, digits=digits, name=self.name
)
def _get_connectors_records(self, sequence_repository):
"""Gets connectors records from a sequence repository"""
collection = self.connectors_collection
if collection is None:
return []
else:
return list(sequence_repository.collections[collection].values())
def _detect_constructs_number_error(self, found, flaws_list):
"""Add a new flaw to the list if unexpected constructs_number"""
expected = self.expected_constructs
if (expected != 'any_number') and (expected != found):
flaw = AssemblyFlaw(
assembly=self,
message="Wrong number of constructs",
suggestion="Check assembly or parts design",
data={"expected_": expected, "found": found},
)
flaws_list.append(flaw)
def _detect_max_constructs_reached(self, found, flaws_list):
"""Add a new flaw to the list if max constructs is reached"""
max_allowed = self.max_constructs
if (max_allowed is not None) and (max_allowed <= found):
message = "Max construct number reached, there may be been more!"
flaw = AssemblyFlaw(
assembly=self,
message=message,
suggestion="Check assembly or parts design",
data={"max": max_allowed, "found": found},
)
flaws_list.append(flaw)
def format_string(value):
"""Formatting utility to parse spreadsheet cell values"""
if value.lower() == "false":
return False
if value.lower() == "true":
return True
try:
value = float(value)
if int(value) == value:
value = int(value)
except ValueError:
pass
return value
| StarcoderdataPython |
3266870 | import sys
# look in ../ BEFORE trying to import Algorithmia. If you append to the
# you will load the version installed on the computer.
sys.path = ['../'] + sys.path
import unittest, os, uuid
import Algorithmia
from Algorithmia.datafile import DataFile, LocalDataFile
class DataFileTest(unittest.TestCase):
def setUp(self):
self.client = Algorithmia.client()
def test_get_nonexistant(self):
df = self.client.file('data://.my/nonexistant/nonreal')
try:
df.getFile()
retrieved_file = True
except Exception as e:
retrieved_file = False
self.assertFalse(retrieved_file)
def test_get_str(self):
df = self.client.file('data://.my/nonexistant/nonreal')
try:
print(df.getString())
retrieved_file = True
except Exception as e:
retrieved_file = False
self.assertFalse(retrieved_file)
def test_set_attributes(self):
df = DataFile(self.client, 'data://.my/empty')
try:
df.set_attributes({
'last_modified': '2019-01-09T22:44:31.632Z',
'size': 0
})
except Exception as e:
self.fail("set_attributes failed with exception: " + str(e))
class LocalFileTest(unittest.TestCase):
DUMMY_TEXT = 'this file gets populated during testing'
EXISTING_TEXT = 'this file exists before testing'
def setUp(self):
self.client = Algorithmia.client()
# Make a file that DOES exist and has contents,
self.EXISTING_FILE = 'file://'+str(uuid.uuid1())+'.txt'
f = open(self.EXISTING_FILE.replace('file://', ''), 'w')
f.write(self.EXISTING_TEXT)
f.close()
# We need a dummy file that doesnt currently exist
self.DUMMY_FILE = 'file://'+str(uuid.uuid1())+'.txt'
if os.path.isfile(self.DUMMY_FILE): os.remove(self.DUMMY_FILE)
def tearDown(self):
os.remove(self.EXISTING_FILE.replace('file://', ''))
if os.path.isfile(self.DUMMY_FILE): os.remove(self.DUMMY_FILE.replace('file://', ''))
def test_local_remote(self):
self.assertTrue(isinstance(self.client.file(self.DUMMY_FILE), LocalDataFile))
self.assertTrue(isinstance(self.client.file('data://foo'), DataFile))
def test_exists_or_not(self):
self.assertTrue(self.client.file(self.EXISTING_FILE).exists())
self.assertFalse(self.client.file(self.DUMMY_FILE).exists())
def test_get_nonexistant(self):
df = self.client.file(self.DUMMY_FILE)
try:
df.getFile()
retrieved_file = True
except Exception as e:
retrieved_file = False
self.assertFalse(retrieved_file)
def test_put_and_read_and_delete(self):
f = self.client.file(self.DUMMY_FILE)
f.put(self.DUMMY_TEXT)
# Check getString
txt = self.client.file(self.DUMMY_FILE).getString()
self.assertEqual(txt, self.DUMMY_TEXT)
# Check delete
deletion_status = self.client.file(self.DUMMY_FILE).delete()
self.assertTrue(deletion_status)
def test_read_types(self):
# Check getBytes
txt = self.client.file(self.EXISTING_FILE).getBytes().decode('utf-8')
self.assertEqual(txt, self.EXISTING_TEXT)
# Check getFile
txt = self.client.file(self.EXISTING_FILE).getFile().read()
self.assertEqual(txt, self.EXISTING_TEXT)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4808189 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 20:57:29 2012
@author: garrett
@email: <EMAIL>
original pygmail from:
https://github.com/vinod85/pygmail/blob/master/pygmail.py
"""
import imaplib, smtplib
import re
from email.mime.text import MIMEText
class pygmail(object):
IMAP_SERVER='imap.gmail.com'
IMAP_PORT=993
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT=465
def __init__(self):
self.M = None
self.response = None
self.mailboxes = []
def login(self, username, password):
self.M = imaplib.IMAP4_SSL(self.IMAP_SERVER, self.IMAP_PORT)
self.S = smtplib.SMTP_SSL(self.SMTP_SERVER, self.SMTP_PORT)
rc, self.response = self.M.login(username, password)
sc, self.response_s = self.S.login(username, password)
self.username = username
return rc, sc
def send_mail(self, to_addrs, msg, subject = None):
msg = MIMEText(msg)
if subject != None:
msg['Subject'] = subject
msg['From'] = self.username
msg['To'] = to_addrs
return self.S.sendmail(self.username, to_addrs, msg.as_string())
def get_mailboxes(self):
rc, self.response = self.M.list()
for item in self.response:
self.mailboxes.append(item.split()[-1])
return rc
def get_mail_count(self, folder='Inbox'):
rc, self.response = self.M.select(folder)
return self.response[0]
def get_unread_count(self, folder='Inbox'):
rc, self.response = self.M.status(folder, "(UNSEEN)")
unreadCount = re.search("UNSEEN (\d+)", self.response[0]).group(1)
return unreadCount
def get_imap_quota(self):
quotaStr = self.M.getquotaroot("Inbox")[1][1][0]
r = re.compile('\d+').findall(quotaStr)
if r == []:
r.append(0)
r.append(0)
return float(r[1])/1024, float(r[0])/1024
def get_mails_from(self, uid, folder='Inbox'):
status, count = self.M.select(folder, readonly=1)
status, response = self.M.search(None, 'FROM', uid)
email_ids = [e_id for e_id in response[0].split()]
return email_ids
def get_mail_from_id(self, id):
status, response = self.M.fetch(id, '(body[header.fields (subject)])')
return response
def rename_mailbox(self, oldmailbox, newmailbox):
rc, self.response = self.M.rename(oldmailbox, newmailbox)
return rc
def create_mailbox(self, mailbox):
rc, self.response = self.M.create(mailbox)
return rc
def delete_mailbox(self, mailbox):
rc, self.response = self.M.delete(mailbox)
return rc
def logout(self):
self.M.logout()
self.S.quit()
if __name__ == '__main__':
user = '<EMAIL>'
pwd = '<PASSWORD>'
gm = pygmail()
gm.login(user, pwd)
send_to = '<EMAIL>'
msg = 'Hi there, have you ever thought about the suffering of animals? Go vegan!'
gm.send_mail(send_to, msg, 'peace')
| StarcoderdataPython |
1706833 | <reponame>sling254/Newscatch
from flask import render_template, url_for
from . import main
from ..request import get_news_source, get_articles
@main.route('/')
def index():
"""
a function to view the home page
"""
technology = get_news_source('technology')
sports = get_news_source('sports')
business = get_news_source('business')
science = get_news_source('science')
title = "News Home Source"
return render_template('index.html', title=title, technology=technology, sports=sports,business=business, science=science)
@main.route('/articles/<id>')
def display_article(id):
'''
articles page function that returns the articles page
'''
article = get_articles(id)
title = f'News Room articles | {id}'
return render_template('articles.html',title= title,articles= article) | StarcoderdataPython |
89996 | <filename>autovirt/equipment/interface/__init__.py
from .equipment import EquipmentGateway
| StarcoderdataPython |
111789 | <reponame>ploshkin/hivemind<filename>hivemind/client/optim/collaborative.py
from __future__ import annotations
import warnings
from dataclasses import dataclass
from threading import Thread, Lock, Event
from typing import Optional, Type
import logging
import torch
import numpy as np
from hivemind.dht import DHT
from hivemind.client.optim.base import DecentralizedOptimizerBase
from hivemind.client.averaging.training import TrainingAverager
from hivemind.utils import get_logger, get_dht_time, run_in_background, ValueWithExpiration
from hivemind.client.optim.performance_ema import PerformanceEMA
logger = get_logger(__name__)
LRSchedulerBase = getattr(torch.optim.lr_scheduler, '_LRScheduler', None)
@dataclass(frozen=False)
class CollaborationState:
optimizer_step: int
samples_accumulated: int
target_batch_size: int
num_peers: int
num_clients: int
eta_next_step: float
next_fetch_time: float
@property
def ready_for_step(self):
return self.samples_accumulated >= self.target_batch_size or get_dht_time() >= self.eta_next_step
def register_step(self):
self.optimizer_step += 1
self.samples_accumulated = 0
self.eta_next_step = float('inf')
class CollaborativeOptimizer(DecentralizedOptimizerBase):
"""
An optimizer that performs model updates after collaboratively accumulating a target (large) batch size across peers
These optimizers use DHT to track how much progress did the collaboration make towards target batch size.
Once enough samples were accumulated, optimizers will compute a weighted average of their statistics.
:note: This optimizer behaves unlike regular pytorch optimizers in two ways:
- calling .step will periodially zero-out gradients w.r.t. model parameters after each step
- it may take multiple .step calls without updating model parameters, waiting for peers to accumulate enough samples
:param opt: a standard pytorch optimizer, preferably a large-batch one such as LAMB, LARS, etc.
:param dht: a running hivemind.DHT daemon connected to other peers
:param prefix: a common prefix for all metadata stored by CollaborativeOptimizer in the DHT
:param target_batch_size: perform optimizer step after all peers collectively accumulate this many samples
:param batch_size_per_step: before each call to .step, user should accumulate gradients over this many samples
:param target_group_size: maximum group size for DecentralizedAverager's all-reduce
:param min_refresh_period: wait for at least this many seconds before fetching new collaboration state
:param max_refresh_period: wait for at most this many seconds before fetching new collaboration state
:param default_refresh_period: if no peers are detected, attempt to fetch collaboration state this often (seconds)
:param expected_drift_peers: assume that this many new peers can join between steps
:param expected_drift_rate: assumes that this fraction of current collaboration can join/leave between steps
:note: the expected collaboration drift parameters are used to adjust the frequency with which this optimizer will
refresh the collaboration-wide statistics (to avoid missing the moment when to run the next step)
:param bandwidth: peer's network bandwidth for the purpose of load balancing (recommended: internet speed in mbps)
:param performance_ema_alpha: smoothing value used to estimate this peer's performance (training samples per second)
:param averaging_expiration: peer's requests for averaging will be valid for this many seconds
:param metadata_expiration: peer's metadata (e.g. samples processed) is stored onto DHT for this many seconds
:param averaging_timeout: if an averaging step hangs for this long, it will be cancelled.
:param scheduler: if specified, use this scheduler to update optimizer learning rate
:note: if you are using CollaborativeOptimizer with a lr_scheduler, it is recommended to pass this scheduler
explicitly into this class. Otherwise, scheduler may not be synchronized between peers.
"""
def __init__(self, opt: torch.optim.Optimizer, *, dht: DHT, prefix: str, target_batch_size: int,
batch_size_per_step: Optional[int] = None, scheduler: Optional[LRSchedulerBase] = None,
min_refresh_period: float = 0.5, max_refresh_period: float = 30, default_refresh_period: float = 3,
expected_drift_peers: float = 3, expected_drift_rate: float = 0.2, performance_ema_alpha: float = 0.1,
metadata_expiration: float = 30.0, averaging_timeout: Optional[float] = None, verbose: bool = False,
**kwargs):
super().__init__(opt, dht)
self.prefix, self.scheduler = prefix, scheduler
self.target_batch_size, self.batch_size_per_step = target_batch_size, batch_size_per_step
self.min_refresh_period, self.max_refresh_period, self.default_refresh_period =\
min_refresh_period, max_refresh_period, default_refresh_period
self.expected_drift_peers, self.expected_drift_rate = expected_drift_peers, expected_drift_rate
self.averaging_timeout, self.metadata_expiration = averaging_timeout, metadata_expiration
self.status_loglevel = logging.INFO if verbose else logging.DEBUG
self.averager = self._make_averager(**kwargs)
self.training_progress_key = f"{self.prefix}_progress"
self.local_samples_accumulated = 0 # a number of local samples accumulated since last optimizer update
self.local_steps_accumulated = 0 # a number of calls to step() since last optimizer update
self.performance_ema = PerformanceEMA(alpha=performance_ema_alpha)
self.last_step_time = None
self.collaboration_state = self.fetch_collaboration_state()
self.lock_collaboration_state, self.collaboration_state_updated = Lock(), Event()
self.lock_local_progress, self.should_report_progress = Lock(), Event()
self.progress_reporter = Thread(target=self.report_training_progress, daemon=True, name=f"{self}.reporter")
self.progress_reporter.start()
self.collaboration_state_updater = Thread(target=self.check_collaboration_state_periodically, daemon=True,
name=f"{self}.collaboration_state_updater")
self.collaboration_state_updater.start()
def _make_averager(self, **kwargs):
return TrainingAverager(self.opt, dht=self.dht, average_parameters=True, average_gradients=True,
prefix=f"{self.prefix}_averaging", allreduce_timeout=self.averaging_timeout, **kwargs)
@property
def local_step(self) -> int:
return self.averager.local_step
@property
def is_synchronized(self) -> bool:
return self.local_step >= self.collaboration_state.optimizer_step
def is_alive(self) -> bool:
return self.averager.is_alive()
def load_state_from_peers(self, **kwargs):
""" Attempt to fetch the newest collaboration state from other peers """
with self.lock_collaboration_state:
self.averager.load_state_from_peers(**kwargs)
self.local_samples_accumulated = self.local_steps_accumulated = 0
self.update_scheduler()
self.opt.zero_grad()
def step(self, batch_size: Optional[int] = None, **kwargs):
"""
Report accumulating gradients w.r.t. batch_size additional samples, optionally update model parameters
:param batch_size: optional override for batch_size_per_step from init
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
"""
if batch_size is not None and self.batch_size_per_step is None:
raise ValueError("Please either set batch_size_per_step parameter at init or provide batch_size in .step")
batch_size = self.batch_size_per_step if batch_size is None else batch_size
if not self.is_synchronized:
self.load_state_from_peers()
return
if self.last_step_time is not None and get_dht_time() - self.last_step_time > self.metadata_expiration:
logger.warning(f"Training step took {get_dht_time() - self.last_step_time}, "
f"but metadata expired in {self.metadata_expiration} s.")
with self.lock_local_progress:
self.local_samples_accumulated += batch_size
self.local_steps_accumulated += 1
self.performance_ema.update(num_processed=self.batch_size_per_step)
self.should_report_progress.set()
if not self.collaboration_state.ready_for_step:
return
logger.log(self.status_loglevel, "Averaging parameters and gradients with peers...")
self.collaboration_state = self.fetch_collaboration_state()
self.collaboration_state_updated.set()
if not self.is_synchronized:
self.load_state_from_peers()
return
with self.performance_ema.pause(), self.lock_collaboration_state:
if self.collaboration_state.num_peers > 1:
mean_samples_per_worker = self.target_batch_size / self.collaboration_state.num_peers
weight = self.local_samples_accumulated / mean_samples_per_worker
output = self.averager.step(weight=weight, timeout=self.averaging_timeout, **kwargs)
else:
logger.log(self.status_loglevel, f"Skipped averaging: collaboration consists of "
f"{self.collaboration_state.num_peers} peer(s).")
output = None
self.averager.local_step += 1
self.opt.step()
self.opt.zero_grad()
self.local_samples_accumulated = self.local_steps_accumulated = 0
self.collaboration_state.register_step()
self.collaboration_state_updated.set()
self.update_scheduler()
logger.log(self.status_loglevel, f"Optimizer step: done!")
return output
def report_training_progress(self):
""" Periodically publish metadata and the current number of samples accumulated towards the next step """
while self.is_alive():
self.should_report_progress.wait()
self.should_report_progress.clear()
with self.lock_local_progress:
current_time = get_dht_time()
local_state_info = [self.local_step, self.local_samples_accumulated,
self.performance_ema.samples_per_second, current_time, not self.averager.listen]
assert self.is_valid_peer_state(local_state_info), local_state_info
self.dht.store(self.training_progress_key, subkey=self.averager.endpoint, value=local_state_info,
expiration_time=current_time + self.metadata_expiration, return_future=True)
def check_collaboration_state_periodically(self):
"""
Periodically check the training progress from all peers. Trigger update after target_batch_size total samples
"""
while self.is_alive():
time_to_next_update = max(0.0, self.collaboration_state.next_fetch_time - get_dht_time())
if self.collaboration_state_updated.wait(time_to_next_update):
self.collaboration_state_updated.clear()
continue # if state was updated externally, reset timer
with self.lock_collaboration_state:
self.collaboration_state = self.fetch_collaboration_state()
def fetch_collaboration_state(self) -> CollaborationState:
""" Read performance statistics reported by peers, estimate progress towards next batch """
response, _expiration = self.dht.get(self.training_progress_key, latest=True) or (None, -float('inf'))
current_time = get_dht_time()
if not isinstance(response, dict) or len(response) == 0:
logger.log(self.status_loglevel, f"Found no active peers: {response}")
local_eta_next_step = max(0, self.target_batch_size - self.local_steps_accumulated
) / self.performance_ema.samples_per_second
return CollaborationState(self.local_step, self.local_samples_accumulated, self.target_batch_size,
num_peers=0, num_clients=0, eta_next_step=current_time + local_eta_next_step,
next_fetch_time=current_time + self.default_refresh_period)
valid_peer_states = [peer_state.value for peer_state in response.values()
if isinstance(peer_state, ValueWithExpiration)
and self.is_valid_peer_state(peer_state.value)]
num_peers = len(valid_peer_states)
num_clients = sum(is_client for *_, is_client in valid_peer_states)
global_optimizer_step = self.local_step
for opt_step, samples_accumulated, samples_per_second, timestep, is_client in valid_peer_states:
if not is_client:
global_optimizer_step = max(global_optimizer_step, opt_step)
total_samples_accumulated = estimated_curent_samples = total_samples_per_second = 0
for opt_step, samples_accumulated, samples_per_second, timestep, is_client in valid_peer_states:
total_samples_per_second += samples_per_second
if opt_step == global_optimizer_step:
total_samples_accumulated += samples_accumulated
estimated_curent_samples += samples_accumulated + max(0, current_time - timestep) * samples_per_second
# note: we deliberately count only valid peers for samples_accumulated, but all peers for performance;
# the rationale behind this is that outdated peers will synchronize and begin contributing shortly.
estimated_samples_remaining = self.target_batch_size - estimated_curent_samples
estimated_time_to_next_step = max(0, estimated_samples_remaining) / total_samples_per_second
expected_max_peers = max(num_peers + self.expected_drift_peers, num_peers * (1 + self.expected_drift_rate))
time_to_next_fetch = float(np.clip(a=estimated_time_to_next_step * num_peers / expected_max_peers,
a_min=self.min_refresh_period, a_max=self.max_refresh_period))
logger.log(self.status_loglevel, f"Collaboration accumulated {total_samples_accumulated} samples from "
f"{num_peers} peers; ETA {estimated_time_to_next_step:.2f} seconds "
f"(refresh in {time_to_next_fetch:.2f}s.)")
return CollaborationState(
global_optimizer_step, total_samples_accumulated, target_batch_size=self.target_batch_size,
num_peers=num_peers, num_clients=num_clients, eta_next_step=current_time + estimated_time_to_next_step,
next_fetch_time=current_time + time_to_next_fetch)
def zero_grad(self, *args, **kwargs):
warnings.warn("CollaborativeOptimizer.zero_grad is a no-op and doesn't need to be called")
@staticmethod
def is_valid_peer_state(state):
return isinstance(state, (list, tuple)) and len(state) == 5 \
and all(map(isinstance, state, (int, int, float, float, bool)))
def update_scheduler(self):
if self.scheduler:
while self.scheduler._step_count < self.local_step:
self.scheduler.step()
def shutdown(self):
logger.debug("Shutting down averager...")
self.averager.shutdown()
logger.debug("Sending goodbye to peers...")
self.dht.store(self.training_progress_key, subkey=self.averager.endpoint, value=None,
expiration_time=get_dht_time() + self.metadata_expiration)
logger.debug(f"{self.__class__.__name__} is shut down.")
def __del__(self):
self.shutdown()
| StarcoderdataPython |
3273070 | from sympy.abc import x
import sympy as sp
import math
def newton_iteration(x0, func, tol=1e-9, Max_iter=100):
""" Solve non-linear equation by Newton iteratin method.
Args:
x0: double, the initial value of the iteration
func: symbol object, the non-linear equation to be solved
tol: double, the iteration accuracy
Max_iter: int, maximum iteration number
Returns:
k: int, iteration number
v: double, root of the non-linear equation
"""
# derivative
y_diff = sp.diff(func(x))
# first iteration
k = 1
u = x0
v = u - func(u)/y_diff.subs(x, u)
# iteration
while math.fabs(v - u) >= tol and y_diff.subs(x, v) != 0 and k < Max_iter:
k += 1
u = v
v = u - func(u)/y_diff.subs(x, u)
return k, v
def f(x):
return sp.Pow(x, 3) + sp.Pow(x, 2) - 3*x - 3
if __name__ == '__main__':
""" The initial values
When x0 = -2, it converges to another root -sqrt{3}
When x0 = -1, it is the root of the function
When x0 = 0, it converges to -1
When x0 = 2, it converges to sqrt{3} quickly
When x0 = 3000000, it converges to sqrt{3} slowly
"""
x0_lst = [-2.0, -1.0, 0.0, 2.0, 9999999999999999.0]
for x0 in x0_lst:
n, root = newton_iteration(x0, f, 1e-6)
print(f"The root of the non-linear equation with initial value {x0} is {root:.7f}.")
print(f"Iteration number is {n}.")
| StarcoderdataPython |
4808551 | <reponame>rGunti/flpy_bank
from dataclasses import asdict
from typing import List
import yaml
from flpy_bank.exporter import DataExporter
from flpy_bank.objects import Record
class YamlExporter(DataExporter):
def __init__(self,
file: str):
self.file = file
def export_data(self, data: List[Record]):
dict_data = [asdict(i) for i in data]
with open(self.file, 'w') as f:
f.write(yaml.safe_dump(dict_data))
| StarcoderdataPython |
157059 | from torchtext import data
from torchtext import datasets
# Testing SNLI
print("Run test on SNLI...")
TEXT = datasets.nli.ParsedTextField()
LABEL = data.LabelField()
TREE = datasets.nli.ShiftReduceField()
train, val, test = datasets.SNLI.splits(TEXT, LABEL, TREE)
print("Fields:", train.fields)
print("Number of examples:\n", len(train))
print("First Example instance:\n", vars(train[0]))
TEXT.build_vocab(train)
LABEL.build_vocab(train)
train_iter, val_iter, test_iter = data.Iterator.splits((train, val, test), batch_size=3)
batch = next(iter(train_iter))
print("Numericalize premises:\n", batch.premise)
print("Numericalize hypotheses:\n", batch.hypothesis)
print("Entailment labels:\n", batch.label)
print("Test iters function")
train_iter, val_iter, test_iter = datasets.SNLI.iters(batch_size=4, trees=True)
batch = next(iter(train_iter))
print("Numericalize premises:\n", batch.premise)
print("Numericalize hypotheses:\n", batch.hypothesis)
print("Entailment labels:\n", batch.label)
# Testing MultiNLI
print("Run test on MultiNLI...")
TEXT = datasets.nli.ParsedTextField()
LABEL = data.LabelField()
GENRE = data.LabelField()
TREE = datasets.nli.ShiftReduceField()
train, val, test = datasets.MultiNLI.splits(TEXT, LABEL, TREE, GENRE)
print("Fields:", train.fields)
print("Number of examples:\n", len(train))
print("First Example instance:\n", vars(train[0]))
TEXT.build_vocab(train)
LABEL.build_vocab(train)
GENRE.build_vocab(train, val, test)
train_iter, val_iter, test_iter = data.Iterator.splits((train, val, test), batch_size=3)
batch = next(iter(train_iter))
print("Numericalize premises:\n", batch.premise)
print("Numericalize hypotheses:\n", batch.hypothesis)
print("Entailment labels:\n", batch.label)
print("Genre categories:\n", batch.genre)
print("Test iters function")
train_iter, val_iter, test_iter = datasets.MultiNLI.iters(batch_size=4, trees=True)
batch = next(iter(train_iter))
print("Numericalize premises:\n", batch.premise)
print("Numericalize hypotheses:\n", batch.hypothesis)
print("Entailment labels:\n", batch.label)
| StarcoderdataPython |
1789249 | class Vehicle:
def __init__(self,regnum,make,model,color):
self.regnum=regnum
self.make=make
self.model=model
self.color=color
class PassengerVehicle(Vehicle):
def __init__(self, regnum, make, model, color,pasCap):
super().__init__(regnum, make, model, color)
self.pascap=pasCap
def set(self):
setattr(self,input("What you want to change "),input("Enter the value "))
def get(self):
print(self.regnum,' ',self.make,' ',self.model,' ',self.color,' ',self.pascap)
class CommercialVehicle(Vehicle):
def __init__(self, regnum, make, model, color,loadCap):
super().__init__(regnum, make, model, color)
self.loadcap=loadCap
def set(self):
setattr(self,input("What you want to change "),input("Enter the value "))
def get(self):
print(self.regnum,' ',self.make,' ',self.model,' ',self.color,' ',self.loadcap)
# ob1=PassengerVehicle("007",'BMW','7 series',"White",'5')
# ob1.get()
# ob1=CommercialVehicle("420",'Mercedes','Actros 2640 LS',"Black",'4 T')
# ob1.get() | StarcoderdataPython |
1687434 | import sys
import dask.dataframe as dd
from handling_data.handling_data import HandlingData
from automl.mltrons_automl import MltronsAutoml
ddf = dd.read_csv("titanic.csv")
target_variable = 'Survived'
problem_type = 'Classification'
h = HandlingData(ddf, target_variable, problem_type)
train_pool, test_pool, order_of_features = h.init_data_handling()
print(order_of_features)
sys.exit()
auto_ml = MltronsAutoml(problem_type, target_variable, order_of_features)
auto_ml.fit(train_pool, test_pool)
### list of models
#auto_ml.models
### getting prediction
auto_ml.models[0].predict(test_pool)
###
| StarcoderdataPython |
1692401 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 13:45:43 2020
@author: antony
"""
import json
import time
import urllib.request
import collections
import subprocess
import re
pubs = json.load(open('publications.json', 'r'))
URL = 'https://www.ncbi.nlm.nih.gov/pubmed/?term={}&report=docsum&format=text'
from Bio import Entrez
Entrez.email = '<EMAIL>'
Entrez.api_key = '<KEY>'
pubmeds = collections.defaultdict(str)
c = 0
for pub in pubs:
title = pub['title']
if title == '':
continue
if title not in pubmeds or pubmeds[title]['id'] == '':
print(title)
handle = Entrez.esearch(db='pubmed', term=title, field='title')
record = Entrez.read(handle)
handle.close()
if len(record['IdList']) > 0:
handle = Entrez.efetch(db='pubmed', id=record['IdList'][0], retmode='medline', rettype='text')
text = handle.read()
handle.close()
id = ''
matcher = re.search(r'PMID: (\d+)', text)
if matcher:
id = matcher.group(1)
print(id)
pubmeds[title] = {'id':id, 'data':text}
time.sleep(1)
#break
print(c)
c += 1
f = open('pubmed_ids.txt', 'w')
f.write('title\tpubmed\n')
for title in sorted(pubmeds):
f.write('{}\t{}\n'.format(title, pubmeds[title]['id']))
f.close() | StarcoderdataPython |
1782592 | <reponame>dolong2110/Algorithm-By-Problems-Python
from typing import List
def sortedSquares(nums: List[int]) -> List[int]:
answer = [0] * len(nums)
l, r = 0, len(nums) - 1
while l <= r:
left, right = abs(nums[l]), abs(nums[r])
if left > right:
answer[r - l] = left * left
l += 1
else:
answer[r - l] = right * right
r -= 1
return answer | StarcoderdataPython |
3341532 | <filename>document.py
import calendar
import click
import locale
import yaml
from datetime import date
from mailmerge import MailMerge
from os import path
locale.setlocale(locale.LC_TIME, 'es_ES.UTF-8')
CONFIG = "config.yml"
TEMPLATE_1 = "cuenta-de-cobro-v1.docx"
TEMPLATE_2 = "cuenta-de-cobro-v2.docx"
def load_config():
with open(CONFIG, "r") as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
return cfg
def get_date(year=None, month=None):
today = date.today()
if year and month:
today = date(year, month, calendar.monthrange(year, month)[1])
days = calendar.monthrange(
int(today.strftime("%Y")),
int(today.strftime("%m"))
)[1]
formated_date = today.strftime(r"%d de %B de %Y")
formated_date_start = today.replace(day=1).strftime("%d de %B de %Y")
formated_date_end = today.replace(day=days).strftime("%d de %B de %Y")
return (formated_date, formated_date_start, formated_date_end)
def create_document(cfg, output="output.docx", cfg_date=False):
template_docx = TEMPLATE_1
if cfg_date:
template_docx = TEMPLATE_2
document = MailMerge(template_docx)
# print(document.get_merge_fields())
section_user = cfg["user"]
section_bank = cfg["bank"]
section_target = cfg["target"]
section_service = cfg["service"]
section_custom_date = cfg["custom_date"]
if cfg_date:
document.merge(
amount=section_service["amount"],
bankName=section_bank["bankName"],
bankNum=str(section_bank["bankNum"]),
business=section_target["business"],
name=section_user["name"],
nameUpper=section_user["name"].upper(),
nit=section_target["nit"],
nid=str(section_user["nid"]),
nidCity=section_user["nidCity"],
day=str(section_custom_date["day"]),
dayStart=str(section_custom_date["dayStart"]),
dayEnd=str(section_custom_date["dayEnd"]),
month=section_custom_date["month"],
monthStart=section_custom_date["monthStart"],
monthEnd=section_custom_date["monthEnd"],
year=str(section_custom_date["year"]),
yearStart=str(section_custom_date["yearStart"]),
yearEnd=str(section_custom_date["yearEnd"]),
)
else:
formated_date, formated_date_start, formated_date_end = get_date()
document.merge(
amount=section_service["amount"],
bankName=section_bank["bankName"],
bankNum=str(section_bank["bankNum"]),
business=section_target["business"],
name=section_user["name"],
nameUpper=section_user["name"].upper(),
nit=section_target["nit"],
nid=str(section_user["nid"]),
nidCity=section_user["nidCity"],
currentDate=formated_date,
dateStart=formated_date_start,
dateEnd=formated_date_end,
)
document.write(output)
document.close()
def batch_of_documents(cfg, year, output_dir=""):
template_docx = TEMPLATE_1
section_user = cfg["user"]
section_bank = cfg["bank"]
section_target = cfg["target"]
section_service = cfg["service"]
def dates():
for m in range(1,13):
yield get_date(year=year, month=m)
for r in dates():
formated_date, formated_date_start, formated_date_end = r
output_file = path.join(output_dir, f"{formated_date}.docx")
with MailMerge(template_docx) as document:
document.merge(
amount=section_service["amount"],
bankName=section_bank["bankName"],
bankNum=str(section_bank["bankNum"]),
business=section_target["business"],
name=section_user["name"],
nameUpper=section_user["name"].upper(),
nit=section_target["nit"],
nid=str(section_user["nid"]),
nidCity=section_user["nidCity"],
currentDate=formated_date,
dateStart=formated_date_start,
dateEnd=formated_date_end,
)
document.write(output_file)
@click.group()
def cli():
pass
@cli.command()
@click.option("--output", default="output.docx", help="Output filename")
@click.option("--custom-date", is_flag=True, help="Use date from config.yml")
def single(output, custom_date):
conf = load_config()
create_document(cfg=conf, output=output, cfg_date=custom_date)
@cli.command()
@click.option("--year", required=True, type=int, help="")
@click.option("--output-dir", default="", help="")
def batch(year, output_dir):
conf = load_config()
batch_of_documents(cfg=conf, year=year, output_dir=output_dir)
if __name__ == "__main__":
cli()
| StarcoderdataPython |
1731276 | def is_letter(s:str):
if len(s) == 1 and s.isalpha():
return True
return False | StarcoderdataPython |
3334517 | import logging
from xv_wb import xv_wb
from xv_kws import xv_kws
import json, asyncio
# 默认日志
logging.getLogger().setLevel(logging.INFO)
class xv:
def __init__(self):
self.is_kws = False
self.kws = xv_kws()
self.websocket_server = xv_wb(main=self.main)
async def main(self, websocket, path):
""" websocket 入口函数
"""
logging.info(__name__)
while True:
# 接参 json格式
request = await websocket.recv()
logging.debug(request)
params = json.loads(request)
if self.is_kws is False:
response = await self.kws.main(params)
if response is not False:
await websocket.send(f"ok")
self.is_kws = True
else:
# TODO something
pass
return True
if __name__ == "__main__":
xiaov = xv()
| StarcoderdataPython |
3394113 | """
integration tests on doppel-describe
commandline entrypoint (targeting a
python package)
"""
import json
import os
import pytest
# details that will always be true of doppel-describe output
EXPECTED_TOP_LEVEL_KEYS = set([
"name",
"language",
"functions",
"classes"
])
NUM_TOP_LEVEL_KEYS = len(EXPECTED_TOP_LEVEL_KEYS)
@pytest.fixture()
def rundescribe():
"""
run doppel-describe to generate output file
"""
# there isn't a clean way to pass in
# command-line args to test scripts, so
# using environment variables
test_packages = [
'testpkguno',
'testpkgdos',
'testpkgtres',
'pythonspecific'
]
# Added this abomination because something about
# os.getenv('TEST_PACKAGE_DIR') was resulting in a None
test_data_dir = os.path.abspath('../../test_data')
results = {}
for package_name in test_packages:
cmd = "doppel-describe --language python -p {} --data-dir {}".format(
package_name,
test_data_dir
)
exit_code = os.system(cmd)
if exit_code != 0:
msg = "doppel-describe exited with non-zero exit code: {}"
raise RuntimeError(msg.format(exit_code))
output_file = "python_{}.json".format(package_name)
path_to_output_file = os.path.join(
test_data_dir,
output_file
)
with open(path_to_output_file, 'r') as f:
result_json = json.loads(f.read())
results[package_name] = result_json
return results
class TestBasicContract:
"""
Tests that check that basic truths about the
JSON file produced by doppel-describe remain
true.
"""
def test_contract(self, rundescribe):
"""
The JSON file produced by doppel-describe should have
only the expected top-level dictionary keys
"""
result_json = rundescribe['testpkguno']
for top_level_key in EXPECTED_TOP_LEVEL_KEYS:
assert result_json.get(top_level_key, False)
assert len(result_json.keys()) == NUM_TOP_LEVEL_KEYS
def test_name(self, rundescribe):
"""
'name' should be a string
"""
assert isinstance(rundescribe['testpkguno']['name'], str)
def test_language(self, rundescribe):
"""
'language' should be 'python'
"""
assert rundescribe['testpkguno']['language'] == 'python'
def test_functions_block(self, rundescribe):
"""
'functions' should be a dictionary keyed
by function name. Each function should have a dictionary keyed
by 'args' where 'args' holds an array of strings.
Nothing other than 'args' should be included in the
function interface.
"""
for func_name, func_interface in rundescribe['testpkguno']['functions'].items():
args = func_interface['args']
assert isinstance(args, list)
assert len(func_interface.keys()) == 1
if len(args) > 0:
assert all([isinstance(x, str) for x in args])
def test_classes_block(self, rundescribe):
"""
'classes' should be a dictionary keyed
by class name. Each of those classes should
have a single section called 'public_methods'.
Each method should have a dictionary keyed
by 'args' where 'args' holds an array of strings.
Nothing other than 'args' should be included in the
method interface and nothing other than 'public_methods'
should be included in the class interface.
"""
for class_name, class_interface in rundescribe['testpkguno']['classes'].items():
assert len(class_interface.keys()) == 1
for method_name, method_interface in class_interface['public_methods'].items():
args = method_interface['args']
assert isinstance(args, list)
assert len(method_interface.keys()) == 1
if len(args) > 0:
assert all([isinstance(x, str) for x in args])
class TestFunctionStuff:
"""
Tests that the "function" block of the JSON
produced by doppel-describe is correct.
"""
def test_functions_found(self, rundescribe):
"""
Exported functions should all be found,
even if decorators are used on them.
No other stuff should end up in "functions".
"""
func_dict = rundescribe['testpkguno']['functions']
expected_functions = [
'function_a',
'function_b',
'function_c'
]
for f in expected_functions:
assert func_dict.get(f, False)
assert len(func_dict.keys()) == len(expected_functions)
def test_empty_function(self, rundescribe):
"""
Functions without any arguments should get an
'args' dictionary with an empty list.
"""
assert rundescribe['testpkguno']['functions']['function_a']['args'] == []
def test_regular_function(self, rundescribe):
"""
Functions with a mix of actual keyword args
and '**kwargs' should have the correct signature.
"""
expected = {
"args": ['x', 'y', '~~KWARGS~~']
}
assert rundescribe['testpkguno']['functions']['function_b'] == expected
def test_kwargs_only_function(self, rundescribe):
"""
Functions with only '**kwargs' should have
the correct signature.
"""
expected = {
"args": ['~~KWARGS~~']
}
assert rundescribe['testpkguno']['functions']['function_c'] == expected
class TestClassStuff:
"""
Tests that the "classes" block of the JSON
produced by doppel-describe is correct.
"""
def test_classes_found(self, rundescribe):
"""
Exported classes should all be found.
"""
class_dict = rundescribe['testpkguno']['classes']
expected_classes = [
'ClassA',
'ClassB',
'ClassC',
'ClassD',
'ClassE',
'ClassF'
]
for c in expected_classes:
assert class_dict.get(c, False)
assert len(class_dict.keys()) == len(expected_classes)
def test_class_public_methods_found(self, rundescribe):
"""
Public class methods of all exported classes
should be found.
No other stuff should end up underneath classes
within "classes".
"""
class_dict = rundescribe['testpkguno']['classes']
expected_methods = [
'~~CONSTRUCTOR~~',
'anarchy',
'banarchy',
'canarchy'
]
for e in expected_methods:
assert class_dict['ClassA']['public_methods'].get(e, False)
assert len(class_dict['ClassA']['public_methods'].keys()) == len(expected_methods)
def test_inherited_class_public_methods_found(self, rundescribe):
"""
Public methods documented in the API of exported
classes should include methods which are defined
by a parent object and not overwritten by the
child.
No other stuff should end up underneath classes
within "classes".
"""
class_dict = rundescribe['testpkguno']['classes']
expected_methods = [
'~~CONSTRUCTOR~~',
'anarchy',
'banarchy',
'canarchy',
'hello_there'
]
for e in expected_methods:
assert class_dict['ClassB']['public_methods'].get(e, False)
assert len(class_dict['ClassB']['public_methods'].keys()) == len(expected_methods)
def test_classmethods_found(self, rundescribe):
"""
Class methods should be correctly found and
documented alongside other public methods in
a class
"""
assert rundescribe['testpkguno']['classes']['ClassC']['public_methods'].get('from_string', False)
def test_inherited_classmethods_found(self, rundescribe):
"""
Class methods inherited from a parent class
should be correctly found and documented
alongside other public methods in a class
"""
assert rundescribe['testpkguno']['classes']['ClassD']['public_methods'].get('from_string', False)
def test_empty_constructors(self, rundescribe):
"""
Classes with constructors that have no keyword args
should be serialized correctly
"""
class_dict = rundescribe['testpkguno']['classes']
expected_methods = [
'~~CONSTRUCTOR~~',
'from_string'
]
for e in expected_methods:
assert class_dict['ClassE']['public_methods'].get(e, False)
# test that things with no kwargs produce "args": [], not "args": {}
# expect_true(isTRUE(
# grepl('.+"ClassE".+~~CONSTRUCTOR~~.+"args"\\:\\[\\]', RESULTS[["testpkguno"]][["raw"]])
# ))
# expect_true(isTRUE(
# grepl('.+"from_string".+~~CONSTRUCTOR~~.+"args"\\:\\[\\]', RESULTS[["testpkguno"]][["raw"]])
# ))
def test_empty_classes(self, rundescribe):
"""
Totally empty classes should still have their
constructors documented
"""
assert list(rundescribe['testpkguno']['classes']['ClassF']['public_methods'].keys()) == ['~~CONSTRUCTOR~~']
assert rundescribe['testpkguno']['classes']['ClassF']['public_methods']['~~CONSTRUCTOR~~'] == {'args': []}
class TestFunctionOnly:
"""
Test the behavior of analyze.py for packages
which have functions but not classes
"""
def test_top_level_keys(self, rundescribe):
"""
The JSON file produce by doppel-describe
should have only the expected top-level dictionary keys
"""
result_json = rundescribe['testpkgdos']
for top_level_key in EXPECTED_TOP_LEVEL_KEYS:
assert result_json.get(top_level_key, None) is not None
assert len(result_json.keys()) == NUM_TOP_LEVEL_KEYS
class TestClassOnly:
"""
Test the behavior of analyze.py for packages
which have classes but not functions
"""
def test_top_level_keys(self, rundescribe):
"""
The JSON file produce by doppel-describe
should have only the expected top-level dictionary keys
"""
result_json = rundescribe['testpkgtres']
for top_level_key in EXPECTED_TOP_LEVEL_KEYS:
assert result_json.get(top_level_key, None) is not None
assert len(result_json.keys()) == NUM_TOP_LEVEL_KEYS
class PythonSpecific:
"""
Test the behavior of analyze.py for packages
with some Python-specific features like
submodules and custom exceptions
"""
def test_top_level_keys(self, rundescribe):
"""
The JSON file produce by doppel-describe
should have only the expected top-level dictionary keys
"""
result_json = rundescribe['pythonspecific']
for top_level_key in EXPECTED_TOP_LEVEL_KEYS:
assert result_json.get(top_level_key, None) is not None
assert len(result_json.keys()) == NUM_TOP_LEVEL_KEYS
def test_sub_modules(self, rundescribe):
"""
analyze.py should correctly handle python submodules and
should ignore package constant.
"""
result_json = rundescribe['pythonspecific']
assert set(result_json['functions'].keys()) == set(['some_function'])
assert set(result_json['classes'].keys()) == set(['SomeClass', 'GreatClass'])
| StarcoderdataPython |
4813326 | <gh_stars>1-10
"""conftest.py -- Configure pytest.
https://docs.pytest.org/en/reorganize-docs/example/simple.html#control-skipping-of-tests-according-to-command-line-option
"""
import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true",
help="run slow tests") | StarcoderdataPython |
140706 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
import constants as const
import transformations.shadow_mask as mask
def add_n_ellipses_light(image, intensity = 0.5, blur_width = 6, n = 1):
inverted_colors = const.WHITE - image
inverted_shadow = add_n_ellipses_shadow(inverted_colors, intensity, blur_width, n)
return const.WHITE - inverted_shadow
def add_n_ellipses_shadow(image, intensity = 0.5, blur_width = 6, n = 1):
for i in range(n):
image = add_ellipse_shadow(image,
intensity = intensity,
blur_width = blur_width,
)
return image
def add_ellipse_light(image, intensity = 0.5, blur_width = 6):
inverted_colors = const.WHITE - image
inverted_shadow = add_ellipse_shadow(inverted_colors, intensity, blur_width)
return const.WHITE - inverted_shadow
def add_ellipse_shadow(image, intensity = 0.5, blur_width = 6):
shadow_mask = np.zeros(image.shape[: 2], dtype=np.uint8)
shadow_mask.fill(const.WHITE)
ellipse = __get_multiple_ellipses(shadow_mask)
return mask.apply_shadow_mask(image, blur_width, intensity, ellipse)
def __get_multiple_ellipses(image):
h, w = image.shape[ : 2]
center = int(w * np.random.uniform()), int(h * np.random.uniform())
random_h = np.random.uniform() * h
random_w = np.random.uniform() * w
axes1 = int(random_h * 0.2), int(random_w * 0.2)
axes2 = int(random_h * 0.4), int(random_w * 0.4)
axes3 = int(random_h * 0.6), int(random_w * 0.6)
axes4 = int(random_h * 0.8), int(random_w * 0.8)
axes5 = int(random_h), int(random_w)
angle = 360 * np.random.uniform()
ellipse = get_single_ellipse(image, center, axes5, angle, const.DARK_WHITE)
ellipse = get_single_ellipse(ellipse, center, axes4, angle, const.LIGHT_GRAY)
ellipse = get_single_ellipse(ellipse, center, axes3, angle, const.GRAY)
ellipse = get_single_ellipse(ellipse, center, axes2, angle, const.DARK_GRAY)
return get_single_ellipse(ellipse, center, axes1, angle, const.LIGHT_BLACK)
def get_single_ellipse(image, center, axes, angle, color):
start_angle = 0
end_angle = 360
thickness = -1
return cv2.ellipse(image, center, axes, angle, start_angle, end_angle,
color, thickness)
| StarcoderdataPython |
145052 | # -*- coding: utf-8 -*-
"""
Created on 12 April, 2019
@author: Tarpelite
"""
import requests,re,collections
from bs4 import BeautifulSoup
import json
# choose your demand
Max_page = 2
key = 'sparse+autoencoder'
start = '2000'
final = '2018'
text_title = 'GStitle.txt'
text_keyword = 'GSkw.txt'
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
keywords = ["machine learning", "deep learning"]
# clear existing file
with open(text_title, 'wt', encoding='utf-8') as f:
f.truncate()
# grab titles
records=[]
cnt = 0
for key in keywords:
for i in range(Max_page):
url = 'https://c.beijingbang.top/scholar?start='+str(i*10)+'&q='+key+'&as_ylo='+start+'&as_yhi='+final
start_html = requests.get(url, headers=headers)
Soup = BeautifulSoup(start_html.text, 'lxml')
papers = Soup.find_all('div', class_='gs_ri')
for paper in papers:
record = {}
title = paper.find_all('h3', class_='gs_rt')
author = paper.find_all('div', class_='gs_a')
abstract = paper.find_all('div', class_='gs_rs')
refs = paper.find_all('div', class_='gs_fl')
if paper.previous_sibling:
urls_soup = BeautifulSoup(str(paper.previous_sibling), 'lxml')
urls = urls_soup.find_all('a')
if len(urls) > 0:
record['urls'] = urls[0].attrs['href']
else:
record['urls'] = ""
if len(title) > 0:
record['title'] = title[0].get_text()
else:
record['title'] = ""
if len(author) > 0:
record['authors'] = author[0].get_text()
else:
record["authors"] = ""
if len(abstract) > 0:
record['abstract'] = abstract[0].get_text()
else:
record['abstract'] = ""
if len(refs)> 0:
record['refs'] = refs[0].get_text()
else:
record['refs'] = ""
records.append(record)
with open("data/"+key+str(cnt)+".json", "w+", encoding='utf-8') as f:
json.dump(record, f)
cnt += 1
print(records)
| StarcoderdataPython |
3377690 | """
This is an AWS Lambda function that watches specific files of a public GitHub
repository since a given date, if it detects new changes, it notifies the user
via Telegram (using a Telegram bot).
"""
from __future__ import annotations # https://www.python.org/dev/peps/pep-0563/
import json
import os
import urllib.error
import urllib.parse
import urllib.request
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import List
import boto3
import jinja2
from utils import console_logger, utc2datetime, datetime2utc
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
TELEGRAM_API_URL = 'https://api.telegram.org/bot'
TELEGRAM_MSG_TEMPLATE_FILE = 'telegram-msg.j2'
logger = console_logger(__name__, LOG_LEVEL)
@dataclass
class Commit:
"""Basic representation of a commit."""
message: str
timestamp: datetime
url: str
@staticmethod
def from_api_dict(api_dict: dict) -> Commit:
"""
Retrieves a 'Commit' object using a dict.
"""
try:
message = api_dict['commit']['message']
timestamp = utc2datetime(api_dict['commit']['committer']['date'])
url = api_dict['html_url']
return Commit(message, timestamp, url)
except KeyError as e:
logger.exception('Seems that the Github API is not using the way '
'to represent commits in JSON format they used '
'to.')
raise e
def get_last_check_date(s3: boto3.session.Session.resource, bucket: str,
key: str) -> datetime:
"""Retrives the last check date from a text file in an S3 bucket."""
try:
s3_obj = s3.Object(bucket, key)
date_str = s3_obj.get()['Body'].read().decode('UTF-8').strip()
return utc2datetime(date_str)
except Exception:
logger.exception('Unable to retrieve the object %s to obtain the last '
' check date, using "now" as the last check date.',
f's3://{bucket}/{key}')
return datetime.now()
def write_check_date(check_date: datetime, s3: boto3.session.Session.resource,
bucket: str, key: str):
"""Saves the check date in iso format in a text file in an S3 bucket."""
check_date_str = datetime2utc(check_date)
object_path = f's3://{bucket}/{key}'
try:
s3_obj = s3.Object(bucket, key)
response = s3_obj.put(Body=check_date_str)
response_metadata = response.get('ResponseMetadata')
if response_metadata.get('HTTPStatusCode') == 200:
logger.info('The check date was saved successfully in %s',
object_path)
else:
logger.error('Unable to save the check date in %s', object_path)
except Exception:
logger.exception('Unable to save the check date in %s', object_path)
def get_github_commits(repo_url: str, files_to_watch: List[str],
since: datetime) -> List[dict]:
"""
Retrieves the Github commits that contain the specified files since an
specific date.
"""
query = {'path': files_to_watch, 'since': since.isoformat()}
params = urllib.parse.urlencode(query, doseq=True,
quote_via=urllib.parse.quote)
url = f'{repo_url}?{params}'
commits: List[dict] = []
try:
with urllib.request.urlopen(url) as response:
commits = json.loads(response.read())
except Exception:
logger.exception('Unable to retrieve the Github repository commits.')
commits = list(map(Commit.from_api_dict, commits))
return commits
def make_telegram_msg(commits: List[dict], watched_files: List[str],
project_name: str, template_file: str) -> str:
"""Creates the text message that will be sent via Telegram."""
template_loader = jinja2.FileSystemLoader(searchpath='.')
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_file)
return template.render(commits=commits, watched_files=watched_files,
project_name=project_name)
def send_telegram_msg(msg: str, chat_id: str, token: str):
"""Sends a text message to an specific Telegram chat."""
msg = urllib.parse.urlencode({
'chat_id': chat_id, 'text': msg, 'disable_web_page_preview': True})
msg = msg.encode('ascii')
url = f'{TELEGRAM_API_URL}{token}/sendMessage'
request = urllib.request.Request(url=url, data=msg, method='POST')
try:
logger.info('Notifying the boss via Telegram...')
with urllib.request.urlopen(request) as response:
parsed_response = json.loads(response.read())
logger.info('Telegram response received: %s', parsed_response)
if parsed_response.get('ok'):
logger.info('The boss has been notified via Telegram.')
else:
logger.error('There was a problem notifying the boss via '
'Telegram O_o.')
except urllib.error.URLError:
logger.exception('There was a problem sending the Telegram message!')
def watch_files(s3_bucket: str, s3_obj_key: str, github_repo_api_url: str,
files_to_watch: List[str], project_name: str,
telegram_msg_template: str, telegram_chat_id: str,
telegram_token: str):
"""Orchestrates all the operation of watching files of the repository."""
logger.info('Retrieving the last check date from "%s"...',
f's3://{s3_bucket}/{s3_obj_key}')
s3 = boto3.resource('s3')
last_check_date = get_last_check_date(s3, s3_bucket, s3_obj_key)
logger.info('Retrieving the commits that contain the files %s since: %s',
','.join(files_to_watch),
last_check_date.strftime('%d/%b/%Y, %I:%M %p'))
commits = get_github_commits(github_repo_api_url, files_to_watch,
last_check_date)
if not commits:
logger.info('There are no recent commmits that include the files the '
'boss is interested on.')
return
five_min_ago = datetime.now() - timedelta(minutes=5)
write_check_date(five_min_ago, s3, s3_bucket, s3_obj_key)
msg = make_telegram_msg(commits, files_to_watch, project_name,
telegram_msg_template)
logger.info('Notifying about %s commit(s).', len(commits))
send_telegram_msg(msg, telegram_chat_id, telegram_token)
def lambda_handler(event, _context):
"""AWS Lambda funtion handler."""
watch_files(
event['s3_bucket'],
event['check_date_file'],
event['github_repo_api_url'],
event['files_to_watch'],
event['project_name'],
TELEGRAM_MSG_TEMPLATE_FILE,
event['telegram_chat_id'],
event['telegram_bot_token']
)
| StarcoderdataPython |
23256 | from sklearn import tree
from matplotlib import pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn import model_selection
from sklearn import metrics
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from pandas import DataFrame
data = pd.read_csv("heart.csv")
# sns.set(style="ticks", color_codes=True)
# plot=sns.pairplot(data)
# plot.savefig("heart.png")
# pd.crosstab(data.sex,data.target).plot(kind="bar",figsize=(15,6),color=['#1CA53B','#AA1111' ])
# plt.title('Heart Disease Frequency for Sex')
# plt.xlabel('Sex (0 = Female, 1 = Male)')
# plt.xticks(rotation=0)
# plt.legend(["Haven't Disease", "Have Disease"])
# plt.ylabel('Frequency')
# plt.savefig("heart1.png")
# pd.crosstab(data.age,data.target).plot(kind="bar",figsize=(20,6))
# plt.title('Heart Disease Frequency for Ages')
# plt.xlabel('Age')
# plt.ylabel('Frequency')
# plt.savefig('heartDiseaseAndAges.png')
feature_names =["age","sex","cp","trestbps","chol" ,"fbs","restecg","thalach","exang","oldpeak","slope","ca","thal"]
x = data[feature_names].values
y = data["target"].values
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=5,shuffle=True)
feature_scaler = StandardScaler()
X_train = feature_scaler.fit_transform(X_train)
X_test = feature_scaler.transform(X_test)
# Krange = range(1,30)
# scores = {}
# scores_list = []
# for k in Krange:
# knn = KNeighborsClassifier(n_neighbors = k)
# knn.fit(X_train,y_train)
# y_pred = knn.predict(X_test)
# scores[k] = metrics.accuracy_score(y_test,y_pred)
# scores_list.append(metrics.accuracy_score(y_test,y_pred))
# plt.plot(Krange,scores_list)
# plt.xlabel("Value of K")
# plt.ylabel("Accuracy")
# plt.savefig("k.png")
# plt.show()
model = KNeighborsClassifier(n_neighbors=7)
model.fit(X_train,y_train)
y_pred= model.predict(X_test)
print("Accuracy KNN:",metrics.accuracy_score(y_test, y_pred))
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=5,shuffle=True)
#Create a Gaussian Classifier
gnb = GaussianNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
print("Accuracy NB:",metrics.accuracy_score(y_test, y_pred))
| StarcoderdataPython |
100973 | <reponame>mamoanwar97/Anynet_modified
import numpy as np
import skimage
import skimage.io
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
from torchvision.utils import save_image
import torch.backends.cudnn as cudnn
from dataloader import KITTI_testloader as DA
from dataloader import test_dataset as ls
from preprocessing.kitti_util import Calibration
from preprocessing.generate_lidar import project_disp_to_points, Calibration
from preprocessing.kitti_sparsify import pto_ang_map
import argparse
from models.anynet import AnyNet
import time
import os
import sys
from tqdm.auto import tqdm
from multiprocessing import Process, Queue, Pool
parser = argparse.ArgumentParser(description='Evaluating Anynet')
parser.add_argument('--datapath', default=None, help='datapath')
parser.add_argument('--pretrained', type=str, default=None, help='pretrained model path')
parser.add_argument('--split_file', type=str, default=None)
parser.add_argument('--save_path', type=str, default='results/pseudoLidar_test1/', help='the path of saving checkpoints and log')
""" OPTIONS """
parser.add_argument('--with_spn', action='store_true', help='with spn network or not')
parser.add_argument('--threads', type=int, default=5)
parser.add_argument('--limit', type=int, default=-1)
""" Anynet modal args """
parser.add_argument('--init_channels', type=int, default=1, help='initial channels for 2d feature extractor')
parser.add_argument('--maxdisplist', type=int, nargs='+', default=[12, 3, 3])
parser.add_argument('--nblocks', type=int, default=2, help='number of layers in each stage')
parser.add_argument('--channels_3d', type=int, default=4, help='number of initial channels 3d feature extractor ')
parser.add_argument('--layers_3d', type=int, default=4, help='number of initial layers in 3d network')
parser.add_argument('--growth_rate', type=int, nargs='+', default=[4,1,1], help='growth rate in the 3d network')
parser.add_argument('--spn_init_channels', type=int, default=8, help='initial channels for spnet')
""" LiDAR args """
parser.add_argument('--max_high', type=int, default=1)
""" Kitti sparsify args """
parser.add_argument('--H', default=64, type=int)
parser.add_argument('--W', default=512, type=int)
parser.add_argument('--D', default=700, type=int)
parser.add_argument('--slice', default=1, type=int)
args = parser.parse_args()
def main():
global args
test_left_img, test_right_img = ls.dataloader(args.datapath, limit=args.limit, split_file=args.split_file)
TestImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(test_left_img, test_right_img),
batch_size=1, shuffle=False, num_workers=4, drop_last=False
)
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
model = AnyNet(args)
model = nn.DataParallel(model).cuda()
if args.pretrained:
if os.path.isfile(args.pretrained):
checkpoint = torch.load(args.pretrained)
model.load_state_dict(checkpoint['state_dict'], strict=False)
print("=> loaded pretrained model '{}'".format(args.pretrained))
else:
print("=> no pretrained model found at '{}', Check the path then try again".format(args.pretrained))
sys.exit(0)
cudnn.benchmark = True
evaluate(TestImgLoader, model)
# pool = Pool(args.threads)
# pbar = tqdm(total=(len(all_outputs)), desc="Generating Results", unit='Example')
# def update(*a):
# pbar.update()
# for i, image in enumerate(all_outputs):
# pool.apply_async(sparse_and_save, args=(args, i, image), callback=update)
# pool.close()
# pool.join()
# pbar.clear(nolock=False)
# pbar.close()
def evaluate(dataloader, model):
total_time = 0
model.eval()
for i, (imgL, imgR) in tqdm(enumerate(dataloader), desc="Generating Examples", total=(len(dataloader)), unit='Example'):
imgL = imgL.float().cuda()
imgR = imgR.float().cuda()
with torch.no_grad():
start_time = time.time()
outputs = model(imgL, imgR)
output3 = torch.squeeze(outputs[3], 1)
sparse_and_save(args, i, output3.cpu())
if i > 0:
total_time = total_time + (time.time() - start_time)
print("Average Time: {} ms ~ {} FPS".format((total_time * 1000)/(len(dataloader) - 1), (len(dataloader) - 1)/total_time))
return
def sparse_and_save(args, i, image):
img_cpu = np.asarray(image)
disp_map = img_cpu[0, :, :]
predix = str(i).zfill(6)
calib_file = '{}/{}.txt'.format(args.datapath +'/training/calib', predix)
calib = Calibration(calib_file)
""" Disparity npy Generation """
# disp_images_path = args.save_path + 'disparity/'
# disp_npy_path = args.save_path + 'npy/'
# if not os.path.isdir(disp_images_path):
# os.makedirs(disp_images_path)
# if not os.path.isdir(disp_npy_path):
# os.makedirs(disp_npy_path)
# skimage.io.imsave(disp_images_path + predix + '.png', (disp_map*255).astype('uint8'))
# np.save(disp_npy_path + predix, disp_map)
""" LiDAR Generation """
# point_cloud_path = args.save_path + 'point_cloud'
# if not os.path.isdir(point_cloud_path):
# os.makedirs(point_cloud_path)
lidar = gen_lidar(disp_map, calib)
# lidar.tofile('{}/{}.bin'.format(point_cloud_path, predix))
""" Sparse LiDAR Generation """
sparse_point_cloud_path = args.save_path + 'sparse_point_cloud'
if not os.path.isdir(sparse_point_cloud_path):
os.makedirs(sparse_point_cloud_path)
sparse_points = gen_sparse_points(lidar, H = args.H, W= args.W, slice=args.slice)
sparse_points.tofile('{}/{}.bin'.format(sparse_point_cloud_path, predix))
return
def gen_lidar(disp_map, calib, max_high=1):
disp_map = (disp_map*255).astype(np.float32)/255.
lidar = project_disp_to_points(calib, disp_map, max_high)
lidar = np.concatenate([lidar, np.ones((lidar.shape[0], 1))], 1)
lidar = lidar.astype(np.float32)
return lidar
def gen_sparse_points(lidar, H=64, W=512, D=700, slice=1):
pc_velo = lidar.reshape((-1, 4))
valid_inds = (pc_velo[:, 0] < 120) & \
(pc_velo[:, 0] >= 0) & \
(pc_velo[:, 1] < 50) & \
(pc_velo[:, 1] >= -50) & \
(pc_velo[:, 2] < 1.5) & \
(pc_velo[:, 2] >= -2.5)
pc_velo = pc_velo[valid_inds]
sparse_points = pto_ang_map(pc_velo, H=H, W=W, slice=slice)
sparse_points = sparse_points.astype(np.float32)
return sparse_points
def conv_disp_to_depth(disp_map, calib):
disp_map[disp_map < 0] = 0
baseline = 0.54
mask = disp_map > 0
depth = calib.f_u * baseline / (disp_map + 1. - mask)
return depth
if __name__ == '__main__':
main()
| StarcoderdataPython |
3280581 | """File holding the Enums and Exceptions for the application. Constants, basically """
from enum import Enum
class Direction(Enum):
""" Enum with possible rotations for the item. Its actual value is unimportant"""
LEFT = -90
RIGHT = 90
class Facing(Enum):
""" Enum with possible facings for the item. """
__order__ = 'NORTH WEST SOUTH EAST' # Required because of backwards compatibility
NORTH = 'North'
WEST = 'West'
SOUTH = 'South'
EAST = 'East'
def get_rotated(self, direction):
"""
Again, a stylistic choice - I did not want to do "magic" list
navigation. This is a "state" machine, and I've mapped the
states to a simple dictionary. Alternative is calculating and
then using modulo for the new direction, or even using nested ifs.
(would be ugly and un-pythonic)
"""
_rotate = {
Direction.LEFT : {
Facing.NORTH : Facing.WEST,
Facing.WEST : Facing.SOUTH,
Facing.SOUTH : Facing.EAST,
Facing.EAST : Facing.NORTH
},
Direction.RIGHT : {
Facing.NORTH : Facing.EAST,
Facing.EAST : Facing.SOUTH,
Facing.SOUTH : Facing.WEST,
Facing.WEST : Facing.NORTH
}
}
return _rotate[direction][self]
class InvalidCommand(Exception):
""" Commands that cannot be executed and can safely be ignored. """
pass
| StarcoderdataPython |
125172 | <reponame>LLNL/LBANN<filename>ci_test/integration_tests/test_integration_onnx_output.py
import functools
import operator
import os
import os.path
import re
import sys
import numpy as np
import google.protobuf.text_format
import pytest
# Local files
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
onnx_model = current_dir + '/experiments/test_integration_onnx_output/lbann.onnx'
import tools
import data.mnist
try:
import onnxruntime
except ModuleNotFoundError:
pytest.skip("Skipping ONNX runtime test; onnxruntime not found.",
allow_module_level=True)
# ==============================================
# Options
# ==============================================
# Training options
num_epochs = 5
mini_batch_size = 64
num_nodes = 2
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Skip test if ONNX option not enabled
if not lbann.has_feature("ONNX"):
pytest.skip("This test requires ONNX.")
trainer = lbann.Trainer(mini_batch_size=mini_batch_size)
model = construct_model(lbann)
data_reader = data.mnist.make_data_reader(lbann)
# No validation set
data_reader.reader[0].validation_percent = 0
optimizer = lbann.SGD(learn_rate=0.01, momentum=0.9)
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# TODO (tym): Figure out how to switch between LBANN builds. See
# GitHub Issue #1289.
import lbann.models
# Layer graph
images = lbann.Input(data_field='samples', name="samples")
labels = lbann.Input(data_field='labels', name="labels")
x = lbann.models.LeNet(10)(images)
probs = lbann.Softmax(x)
loss = lbann.CrossEntropy(probs, labels)
acc = lbann.CategoricalAccuracy(probs, labels)
# Objects for LBANN model
callbacks = [lbann.CallbackPrint(),
lbann.CallbackTimer(),
lbann.CallbackExportOnnx(
debug_string_filename='debug_onnx.txt')]
metrics = [lbann.Metric(acc, name='accuracy', unit='%')]
# Construct model
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph([images, labels]),
objective_function=loss,
metrics=metrics,
callbacks=callbacks)
# ==============================================
# Setup PyTest
# ==============================================
def augment_test_func(test_func):
"""Augment test function to parse log files.
`tools.create_tests` creates functions that run an LBANN
experiment. This function creates augmented functions that parse
the log files after LBANN finishes running, e.g. to check metrics
or runtimes.
Note: The naive approach is to define the augmented test functions
in a loop. However, Python closures are late binding. In other
words, the function would be overwritten every time we define it.
We get around this overwriting problem by defining the augmented
function in the local scope of another function.
Args:
test_func (function): Test function created by
`tools.create_tests`.
Returns:
function: Test that can interact with PyTest.
"""
test_name = test_func.__name__
# Define test function
def func(cluster, dirname):
# Run LBANN experiment
experiment_output = test_func(cluster, dirname)
# Run ONNX model in OnnxRuntime
input0_name = 'samples_0'
input1_name = 'labels_0'
session = onnxruntime.InferenceSession(onnx_model, None)
outputs = session.run(None, {input0_name: np.zeros((1,1,28,28), dtype=np.float32), input1_name: np.zeros((1,10), dtype=np.float32)})
# Return test function from factory function
func.__name__ = test_name
return func
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment,
__file__,
nodes=num_nodes):
globals()[_test_func.__name__] = augment_test_func(_test_func)
| StarcoderdataPython |
4826559 | <reponame>lokesh-lraj/30-Day-LeetCoding-Challenge-april_2020
"""
Given a binary tree where each path going from the root to any leaf form a valid sequence, check if a given string is a valid sequence in such binary tree.
We get the given string from the concatenation of an array of integers arr and the concatenation of all values of the nodes along a path results in a sequence in the given binary tree.
Example 1:
Input: root = [0,1,0,0,1,0,null,null,1,0,0], arr = [0,1,0,1]
Output: true
Explanation:
The path 0 -> 1 -> 0 -> 1 is a valid sequence (green color in the figure).
Other valid sequences are:
0 -> 1 -> 1 -> 0
0 -> 0 -> 0
Input: root = [0,1,0,0,1,0,null,null,1,0,0], arr = [0,0,1]
Output: false
Explanation: The path 0 -> 0 -> 1 does not exist, therefore it is not even a sequence.
Input: root = [0,1,0,0,1,0,null,null,1,0,0], arr = [0,1,1]
Output: false
Explanation: The path 0 -> 1 -> 1 is a sequence, but it is not a valid sequence.
Constraints:
1 <= arr.length <= 5000
0 <= arr[i] <= 9
Each node's value is between [0 - 9].
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isValidSequence(self, root: TreeNode, arr: List[int]) -> bool:
return self.checkpath(root, arr, 0)
def checkpath(self, root, arr, indx):
if not(root) or indx == len(arr):
return False
if root.left == None and root.right == None and root.val == arr[indx] and indx == len(arr)-1:
return True
return root.val == arr[indx] and self.checkpath(root.left, arr, indx+1) or self.checkpath(root.right, arr, indx+1)
| StarcoderdataPython |
3241271 | from bs4 import BeautifulSoup
from urllib2 import urlopen
from pprint import pprint
import re
def fetch_html (url, process_callback):
response = urlopen(url)
return process_callback(BeautifulSoup(response.read(), 'html.parser'))
def enforce (condition, msg, *args):
if not condition:
raise Exception(msg % args)
def process_registrar_page_content (url, callback):
def process (soup):
top = soup.find(id='top')
enforce(top, "Could not find 'top' element in page at '%s':%s",
url, soup.prettify())
content = top.parent.parent
enforce('content' in content['class'],
"Expected #top to be nested within <div class='content'><p><a id='top' /><p>...</div>, not\n%s",
content.prettify() if content else '', soup.prettify())
return callback(content)
return fetch_html(url, process)
def filterMapRegex (items, regex, groups = (1)):
for item in items:
match = re.match(regex, item)
if match:
yield match.group(*groups)
def process_registrar_course_page (dept):
dept = dept.upper()
prefix = dept + ' '
courses = {}
def parse_course (name, text):
items = text.split('.')
courses[name] = { 'dept': dept }
if len(items) > 0:
courses[name]['title'] = items[0]
items = items[1:]
if len(items) > 0:
match = re.match(r'\s*([FWS](?:,[FWS])*|\*)\s+', items[0])
enforce(match, "Could not match terms in '%s'", items[0])
courses[name]['terms'] = match.group(1).replace(',','')
courses[name]['instructor'] = items[-1]
items = items[:-1]
if len(items) > 0:
courses[name]['description'] = '.'.join(items)
def process (content):
text = content.text
text = re.sub(r'\.([\)"]+)', r'\1.', text)
items = filterMapRegex(text.split('\n'),
r'(\d+[A-Z]?)\.\s+([^\n]+)', (1, 2))
for courseId, rest in items:
parse_course(prefix + courseId, rest)
return courses
return process
if __name__ == '__main__':
result = process_registrar_page_content(
'https://registrar.ucsc.edu/catalog/archive/17-18/programs-courses/course-descriptions/math.html',
process_registrar_course_page('math'))
pprint(result)
| StarcoderdataPython |
1646274 | #!/usr/bin/env python
kmh = int(raw_input("Enter km/h: "))
mph = 0.6214 * kmh
print "Speed:", kmh, "KM/H = ", mph, "MPH"
| StarcoderdataPython |
3388073 | <gh_stars>0
import numpy as np
import gym
class State:
def __init__(self, state_space):
self.past_states = []
self.current, self.previous = None, None
if isinstance(state_space, gym.spaces.box.Box):
self.low = state_space.low
self.high = state_space.high
self.den = np.min([1E8 + self.high * 0, np.max([-1E8 + self.high * 0, (self.high - self.low)], axis=0)],
axis=0) / np.array([100] * len(self.high))
self.cardinality = np.round(self.high * 0 + 100, 0).astype(int) + 1
self.bin = True
if isinstance(state_space, gym.spaces.discrete.Discrete):
self.cardinality = [state_space.n]
self.low = 0
self.high = state_space.n - 1
self.bin = False
def bin_it(self):
if self.bin:
binned_state = (self.current - self.low) / self.den
return np.round(binned_state, 0).astype(int)
else:
return self.current
def bin_previous(self):
if self.bin:
binned_state = (self.previous - self.low) / self.den
return np.round(binned_state, 0).astype(int)
else:
return self.previous
def next(self, state):
self.past_states.append(self.current)
self.previous = self.current
self.current = state
| StarcoderdataPython |
1634162 | <filename>cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_bundlemgr_cfg.py
""" Cisco_IOS_XR_bundlemgr_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR bundlemgr package configuration.
This module contains definitions
for the following management objects\:
lacp\: Link Aggregation Control Protocol commands
This YANG module augments the
Cisco\-IOS\-XR\-ifmgr\-cfg,
Cisco\-IOS\-XR\-rgmgr\-cfg,
modules with configuration data.
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class BfdMode(Enum):
"""
BfdMode (Enum Class)
Bfd mode
.. data:: no_cfg = 0
BFD mode not configured on per-bundle basis
.. data:: cisco = 1
BFD mode Cisco
.. data:: ietf = 2
BFD mode IETF
"""
no_cfg = Enum.YLeaf(0, "no-cfg")
cisco = Enum.YLeaf(1, "cisco")
ietf = Enum.YLeaf(2, "ietf")
class BundleCiscoExtTypes(Enum):
"""
BundleCiscoExtTypes (Enum Class)
Bundle cisco ext types
.. data:: lon_signaling_off = 0
LON signaling disabled
.. data:: lon_signaling_on = 1
LON signaling enabled
"""
lon_signaling_off = Enum.YLeaf(0, "lon-signaling-off")
lon_signaling_on = Enum.YLeaf(1, "lon-signaling-on")
class BundleLoadBalance(Enum):
"""
BundleLoadBalance (Enum Class)
Bundle load balance
.. data:: default = 0
Default hash function used
.. data:: efp_auto = 1
Send all traffic for this EFP over an
automatically selected member
.. data:: efp_value = 2
Send all traffic for this EFP over the member
corresponding to the specified hash function
.. data:: source_ip = 3
Load balance according to source IP address
.. data:: destination_ip = 4
Load balance according to detination IP address
"""
default = Enum.YLeaf(0, "default")
efp_auto = Enum.YLeaf(1, "efp-auto")
efp_value = Enum.YLeaf(2, "efp-value")
source_ip = Enum.YLeaf(3, "source-ip")
destination_ip = Enum.YLeaf(4, "destination-ip")
class BundleMaximumActiveLinksMode(Enum):
"""
BundleMaximumActiveLinksMode (Enum Class)
Bundle maximum active links mode
.. data:: default = 0
Default
.. data:: hot_standby = 1
Hot standby
"""
default = Enum.YLeaf(0, "default")
hot_standby = Enum.YLeaf(1, "hot-standby")
class BundleMinimumBandwidthRange(Enum):
"""
BundleMinimumBandwidthRange (Enum Class)
Bundle minimum bandwidth range
.. data:: none = 0
None
.. data:: kbps = 1
kbps
.. data:: mbps = 2
mbps
.. data:: gbps = 3
gbps
"""
none = Enum.YLeaf(0, "none")
kbps = Enum.YLeaf(1, "kbps")
mbps = Enum.YLeaf(2, "mbps")
gbps = Enum.YLeaf(3, "gbps")
class BundleMode(Enum):
"""
BundleMode (Enum Class)
Bundle mode
.. data:: on = 0
On
.. data:: active = 1
Active
.. data:: passive = 2
Passive
"""
on = Enum.YLeaf(0, "on")
active = Enum.YLeaf(1, "active")
passive = Enum.YLeaf(2, "passive")
class BundlePeriod(Enum):
"""
BundlePeriod (Enum Class)
Bundle period
.. data:: true = 1
Use the standard LACP short period (1s)
"""
true = Enum.YLeaf(1, "true")
class BundlePortActivity(Enum):
"""
BundlePortActivity (Enum Class)
Bundle port activity
.. data:: on = 1
On
.. data:: active = 2
Active
.. data:: passive = 3
Passive
.. data:: inherit = 4
Inherit
"""
on = Enum.YLeaf(1, "on")
active = Enum.YLeaf(2, "active")
passive = Enum.YLeaf(3, "passive")
inherit = Enum.YLeaf(4, "inherit")
class ChurnLogging(Enum):
"""
ChurnLogging (Enum Class)
Churn logging
.. data:: actor = 1
Logging for actor churn only
.. data:: partner = 2
Logging for partner churn only
.. data:: both = 3
Logging for actor and partner churn
"""
actor = Enum.YLeaf(1, "actor")
partner = Enum.YLeaf(2, "partner")
both = Enum.YLeaf(3, "both")
class MlacpMaximizeParameter(Enum):
"""
MlacpMaximizeParameter (Enum Class)
Mlacp maximize parameter
.. data:: links = 1
Maximize the number of operational links
.. data:: bandwidth = 2
Maximize the operational bandwidth
"""
links = Enum.YLeaf(1, "links")
bandwidth = Enum.YLeaf(2, "bandwidth")
class MlacpSwitchover(Enum):
"""
MlacpSwitchover (Enum Class)
Mlacp switchover
.. data:: brute_force = 1
Brute force shutdown
.. data:: revertive = 2
Revertive behavior
"""
brute_force = Enum.YLeaf(1, "brute-force")
revertive = Enum.YLeaf(2, "revertive")
class PeriodShortEnum(Enum):
"""
PeriodShortEnum (Enum Class)
Period short enum
.. data:: true = 1
Use the standard LACP short period (1s)
"""
true = Enum.YLeaf(1, "true")
class Lacp(Entity):
"""
Link Aggregation Control Protocol commands
.. attribute:: system_mac
Unique identifier for this system
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: system_priority
Priority for this system. Lower value is higher priority
**type**\: int
**range:** 1..65535
**default value**\: 32768
"""
_prefix = 'bundlemgr-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Lacp, self).__init__()
self._top_entity = None
self.yang_name = "lacp"
self.yang_parent_name = "Cisco-IOS-XR-bundlemgr-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('system_mac', YLeaf(YType.str, 'system-mac')),
('system_priority', YLeaf(YType.uint32, 'system-priority')),
])
self.system_mac = None
self.system_priority = None
self._segment_path = lambda: "Cisco-IOS-XR-bundlemgr-cfg:lacp"
def __setattr__(self, name, value):
self._perform_setattr(Lacp, ['system_mac', 'system_priority'], name, value)
def clone_ptr(self):
self._top_entity = Lacp()
return self._top_entity
| StarcoderdataPython |
1656269 | <filename>datahoarder/web.py
import os
from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS
from datahoarder.source import *
from datahoarder.download import get_download_status
from datahoarder.run import sync
app = Flask(__name__)
CORS(app)
# Disable Flask logging
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Find UI path
ui_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
ui_path += os.path.sep + 'datahoarder-ui' + os.path.sep + 'dist'
class WebThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, name='WebThread', daemon=True)
def run(self):
app.run(host='0.0.0.0', port=4040)
def error_response(error, message):
return jsonify({"error": error, "error_msg": message}), 500
@app.route('/')
def index():
return 'Hello world!'
@app.route('/ui', methods=['GET'])
def serve_dir_directory_index():
return send_from_directory(ui_path, 'index.html')
@app.route('/ui/<path:path>', methods=['GET'])
def serve_file_in_dir(path):
if not os.path.isfile(os.path.join(ui_path, path)):
path = os.path.join(path, 'index.html')
return send_from_directory(ui_path, path)
@app.route('/api/')
def ping():
return jsonify({})
@app.route('/api/get-active-sources')
def active_sources():
return jsonify(get_active_sources())
@app.route('/api/get-available-sources')
def available_sources():
return jsonify(get_available_sources())
@app.route('/api/add-source')
def add_source():
source = request.args['source']
args = {}
if len(request.args.getlist('args[]')) > 0:
args_list = request.args.getlist('args[]')
for a in args_list:
a = json.loads(a)
print(a)
args[a['name']] = a['value']
# The following two checks are technically ignored
if source is None:
return error_response('NO_SOURCE_DEFINED', 'You must define a source to add one.')
# If no arguments are provided, assume quick add
if args is None:
source_args = get_source_metadata(source)['args']
args = {}
for arg in range(len(source_args)):
args[source_args[arg]['name']] = source_args[arg]['default']
# Add to config
source_uid = new_source(source, args)
# sync()
return jsonify({'status': 'OK', 'source_uid': source_uid})
@app.route('/api/remove-source')
def delete_source():
source = request.args.get('source_id')
if Source(source).remove():
return jsonify({'status': 'OK'})
else:
return error_response('SOURCE_NOT_REMOVED', 'An error happened when removing the source.')
@app.route('/api/download-status')
def download_status():
if get_download_status() is {}:
return {}
return jsonify(get_download_status()[:10])
@app.route('/api/sync')
def sync_now():
sync()
return jsonify({'status': 'OK'})
@app.route('/api/test')
def test():
get_available_sources()
return 'hello'
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.