hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
def0136d516a6937de9081dfdf535fe855617e80 | 2,303 | py | Python | python3/koans/about_sets.py | pipe2705/python-koans | 7185c8cf4d8b3aa09d1107c1cb471c4609ec8fe3 | [
"MIT"
] | null | null | null | python3/koans/about_sets.py | pipe2705/python-koans | 7185c8cf4d8b3aa09d1107c1cb471c4609ec8fe3 | [
"MIT"
] | null | null | null | python3/koans/about_sets.py | pipe2705/python-koans | 7185c8cf4d8b3aa09d1107c1cb471c4609ec8fe3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutSets(Koan):
def test_sets_make_keep_lists_unique(self):
highlanders = ['MacLeod', 'Ramirez', 'MacLeod',
'Matunas', 'MacLeod', 'Malcolm', 'MacLeod']
there_can_only_be_only_one = set(highlanders)
self.assertEqual({'MacLeod', 'Ramirez', 'Matunas',
'Malcolm', 'MacLeod'}, there_can_only_be_only_one)
def test_empty_sets_have_different_syntax_to_populated_sets(self):
self.assertEqual({1, 2, 3}, {1, 2, 3})
self.assertEqual(set(), set())
def test_dictionaries_and_sets_use_same_curly_braces(self):
# Note: Literal sets using braces were introduced in python 3.
# They were also backported to python 2.7.
self.assertEqual(set, {1, 2, 3}.__class__)
self.assertEqual(dict, {'one': 1, 'two': 2}.__class__)
self.assertEqual(dict, {}.__class__)
def test_creating_sets_using_strings(self):
self.assertEqual({'12345'}, {'12345'})
self.assertEqual({'1', '2', '3', '4', '5'}, set('12345'))
def test_convert_the_set_into_a_list_to_sort_it(self):
self.assertEqual(['1', '2', '3', '4', '5'], sorted(set('12345')))
# ------------------------------------------------------------------
def test_set_have_arithmetic_operators(self):
scotsmen = {'MacLeod', 'Wallace', 'Willie'}
warriors = {'MacLeod', 'Wallace', 'Leonidas'}
self.assertEqual({'Willie'}, scotsmen - warriors)
self.assertEqual({'MacLeod', 'Leonidas', 'Willie',
'Wallace'}, scotsmen | warriors)
self.assertEqual({'MacLeod', 'Wallace'}, scotsmen & warriors)
self.assertEqual({'Willie', 'Leonidas'}, scotsmen ^ warriors)
# ------------------------------------------------------------------
def test_we_can_query_set_membership(self):
self.assertEqual(True, 127 in {127, 0, 0, 1})
self.assertEqual(True, 'cow' not in set('apocalypse now'))
def test_we_can_compare_subsets(self):
self.assertEqual(True, set('cake') <= set('cherry cake'))
self.assertEqual(True, set('cake').issubset(set('cherry cake')))
self.assertEqual(False, set('cake') > set('pie'))
| 38.383333 | 76 | 0.580981 |
68bdc02b1644cc6b4183d811262f218d2e4cdaa0 | 3,746 | py | Python | pulsar/managers/__init__.py | nuwang/pulsar | ccebb1563c8f8eaca0505ed168920d8121bd8a1e | [
"Apache-2.0"
] | null | null | null | pulsar/managers/__init__.py | nuwang/pulsar | ccebb1563c8f8eaca0505ed168920d8121bd8a1e | [
"Apache-2.0"
] | null | null | null | pulsar/managers/__init__.py | nuwang/pulsar | ccebb1563c8f8eaca0505ed168920d8121bd8a1e | [
"Apache-2.0"
] | null | null | null | """
"""
from abc import ABCMeta, abstractmethod
PULSAR_UNKNOWN_RETURN_CODE = '__unknown__'
class ManagerInterface(object):
"""
Defines the interface to various job managers.
"""
__metaclass__ = ABCMeta
@abstractmethod
def setup_job(self, input_job_id, tool_id, tool_version):
"""
Setup a job directory for specified input (galaxy) job id, tool id,
and tool version.
"""
@abstractmethod
def clean(self, job_id):
"""
Delete job directory and clean up resources associated with job with
id `job_id`.
"""
@abstractmethod
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
"""
Called to indicate that the client is ready for this job with specified
job id and command line to be executed (i.e. run or queue this job
depending on implementation).
"""
@abstractmethod
def get_status(self, job_id):
"""
Return status of job as string, currently supported statuses include
'cancelled', 'running', 'queued', and 'complete'.
"""
@abstractmethod
def return_code(self, job_id):
"""
Return integer indicating return code of specified execution or
PULSAR_UNKNOWN_RETURN_CODE.
"""
@abstractmethod
def stdout_contents(self, job_id):
"""
After completion, return contents of stdout associated with specified
job.
"""
@abstractmethod
def stderr_contents(self, job_id):
"""
After completion, return contents of stderr associated with specified
job.
"""
@abstractmethod
def kill(self, job_id):
"""
End or cancel execution of the specified job.
"""
@abstractmethod
def job_directory(self, job_id):
""" Return a JobDirectory abstraction describing the state of the
job working directory.
"""
class ManagerProxy(object):
"""
Subclass to build override proxy a manager and override specific
functionality.
"""
def __init__(self, manager):
self._proxied_manager = manager
def setup_job(self, *args, **kwargs):
return self._proxied_manager.setup_job(*args, **kwargs)
def clean(self, *args, **kwargs):
return self._proxied_manager.clean(*args, **kwargs)
def launch(self, *args, **kwargs):
return self._proxied_manager.launch(*args, **kwargs)
def get_status(self, *args, **kwargs):
return self._proxied_manager.get_status(*args, **kwargs)
def return_code(self, *args, **kwargs):
return self._proxied_manager.return_code(*args, **kwargs)
def stdout_contents(self, *args, **kwargs):
return self._proxied_manager.stdout_contents(*args, **kwargs)
def stderr_contents(self, *args, **kwargs):
return self._proxied_manager.stderr_contents(*args, **kwargs)
def kill(self, *args, **kwargs):
return self._proxied_manager.kill(*args, **kwargs)
def enable_metadata_directory(self, *args, **kwargs):
return self._proxied_manager.enable_metadata_directory(*args, **kwargs)
def shutdown(self, timeout=None):
""" Optional. """
try:
shutdown_method = self._proxied_manager.shutdown
except AttributeError:
return
shutdown_method(timeout)
def job_directory(self, *args, **kwargs):
return self._proxied_manager.job_directory(*args, **kwargs)
def system_properties(self):
return self._proxied_manager.system_properties()
def __str__(self):
return "ManagerProxy[manager=%s]" % str(self._proxied_manager)
| 29.03876 | 119 | 0.643887 |
7664573020130500360984f633b429aea818bf3d | 3,494 | py | Python | torchmd/forcefields/ff_yaml.py | PhilippThoelke/torchmd | adcb1e12f4bf064298b6b91a694baf900b8d436a | [
"MIT"
] | 224 | 2020-10-08T13:52:27.000Z | 2022-03-18T01:07:05.000Z | torchmd/forcefields/ff_yaml.py | PhilippThoelke/torchmd | adcb1e12f4bf064298b6b91a694baf900b8d436a | [
"MIT"
] | 7 | 2020-10-22T13:41:03.000Z | 2022-01-07T09:12:42.000Z | torchmd/forcefields/ff_yaml.py | PhilippThoelke/torchmd | adcb1e12f4bf064298b6b91a694baf900b8d436a | [
"MIT"
] | 38 | 2020-12-24T00:01:03.000Z | 2022-03-08T11:57:02.000Z | from torchmd.forcefields.forcefield import _ForceFieldBase
from math import radians
import numpy as np
import yaml
class YamlForcefield(_ForceFieldBase):
def __init__(self, mol, prm):
self.mol = mol
self.prm = yaml.load(open(prm), Loader=yaml.FullLoader)
def _get_x_variants(self, atomtypes):
from itertools import product
permutations = np.array(
sorted(
list(product([False, True], repeat=len(atomtypes))),
key=lambda x: sum(x),
)
)
variants = []
for per in permutations:
tmpat = atomtypes.copy()
tmpat[per] = "X"
variants.append(tmpat)
return variants
def get_parameters(self, term, atomtypes):
from itertools import permutations
atomtypes = np.array(atomtypes)
variants = self._get_x_variants(atomtypes)
if term == "bonds" or term == "angles" or term == "dihedrals":
variants += self._get_x_variants(atomtypes[::-1])
elif term == "impropers":
# Position 2 is the improper center
perms = np.array([x for x in list(permutations((0, 1, 2, 3))) if x[2] == 2])
for perm in perms:
variants += self._get_x_variants(atomtypes[perm])
variants = sorted(variants, key=lambda x: sum(x == "X"))
termpar = self.prm[term]
for var in variants:
atomtypestr = ", ".join(var)
if len(var) > 1:
atomtypestr = "(" + atomtypestr + ")"
if atomtypestr in termpar:
return termpar[atomtypestr]
raise RuntimeError(f"{atomtypes} doesn't have {term} information in the FF")
def get_atom_types(self):
return np.unique(self.prm["atomtypes"])
def get_charge(self, at):
params = self.get_parameters("electrostatics", [at,])
return params["charge"]
def get_mass(self, at):
return self.prm["masses"][at]
def get_LJ(self, at):
params = self.get_parameters("lj", [at,])
return params["sigma"], params["epsilon"]
def get_bond(self, at1, at2):
params = self.get_parameters("bonds", [at1, at2])
return params["k0"], params["req"]
def get_angle(self, at1, at2, at3):
params = self.get_parameters("angles", [at1, at2, at3])
return params["k0"], radians(params["theta0"])
def get_dihedral(self, at1, at2, at3, at4):
params = self.get_parameters("dihedrals", [at1, at2, at3, at4])
terms = []
for term in params["terms"]:
terms.append([term["phi_k"], radians(term["phase"]), term["per"]])
return terms
def get_14(self, at1, at2, at3, at4):
params = self.get_parameters("dihedrals", [at1, at2, at3, at4])
terms = []
for term in params["terms"]:
terms.append([term["phi_k"], radians(term["phase"]), term["per"]])
lj1 = self.get_parameters("lj", [at1,])
lj4 = self.get_parameters("lj", [at4,])
return (
params["scnb"] if "scnb" in params else 1,
params["scee"] if "scee" in params else 1,
lj1["sigma14"],
lj1["epsilon14"],
lj4["sigma14"],
lj4["epsilon14"],
)
def get_improper(self, at1, at2, at3, at4):
params = self.get_parameters("impropers", [at1, at2, at3, at4])
return params["phi_k"], radians(params["phase"]), params["per"]
| 33.92233 | 88 | 0.568117 |
ef0497d231512d9ef3febda7ac5542dc41159534 | 69 | py | Python | app/database/api/test/__init__.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 1 | 2022-02-17T18:01:41.000Z | 2022-02-17T18:01:41.000Z | app/database/api/test/__init__.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 2 | 2021-06-19T19:41:15.000Z | 2021-07-21T17:07:48.000Z | app/database/api/test/__init__.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 3 | 2021-06-16T16:31:12.000Z | 2022-02-17T18:02:57.000Z | """
This module defines tests for the database editor API routes.
""" | 23 | 61 | 0.73913 |
55ddc92e7db6e9d6d0609d89c725b0b8fb2fa932 | 23,912 | py | Python | flare/mff/mff.py | jonpvandermause/flare | 494e02395b250ae9052575e0e60aefb33bea1243 | [
"MIT"
] | null | null | null | flare/mff/mff.py | jonpvandermause/flare | 494e02395b250ae9052575e0e60aefb33bea1243 | [
"MIT"
] | null | null | null | flare/mff/mff.py | jonpvandermause/flare | 494e02395b250ae9052575e0e60aefb33bea1243 | [
"MIT"
] | null | null | null | import time
from math import exp
import numpy as np
from numba import njit
from scipy.linalg import solve_triangular
import multiprocessing as mp
import sys
sys.path.append('../../flare/')
from memory_profiler import profile
import flare.gp as gp
import flare.env as env
from flare.kernels import two_body, three_body, two_plus_three_body, two_body_jit
import flare.struc as struc
import flare.mff.utils as utils
from flare.mff.splines_methods import PCASplines, SplinesInterpolation
class MappedForceField:
def __init__(self, GP, grid_params, struc_params):
'''
param: struc_params = {'species': 'C', 'cube_lat': 2*1.763391008}
param: grid_params = {'grid_num': list, 'bounds': list,
'svd_rank': int>0, 'load_grid': None,
'load_svd': None}
'''
self.GP = GP
self.grid_params = grid_params
self.struc_params = struc_params
self.bodies = str(grid_params['bodies'])
self.grid_num_2 = grid_params['grid_num_2']
self.bounds_2 = grid_params['bounds_2']
self.grid_num_3 = grid_params['grid_num_3']
self.bounds_3 = grid_params['bounds_3']
self.svd_rank_2 = grid_params['svd_rank_2']
self.svd_rank_3 = grid_params['svd_rank_3']
bond_struc = self.build_bond_struc(struc_params)
if len(GP.training_data) > 0:
if self.bodies == '2':
self.map = Map2body(self.grid_num_2, self.bounds_2, self.GP, bond_struc,
self.bodies, grid_params['load_grid'], self.svd_rank_2)
elif self.bodies == '3':
self.map = Map3body(self.grid_num_3, self.bounds_3, self.GP, bond_struc,
self.bodies, grid_params['load_grid'],
grid_params['load_svd'], self.svd_rank_3)
elif self.bodies == '2+3':
self.map_2 = Map2body(self.grid_num_2, self.bounds_2, self.GP, bond_struc[0],
self.bodies, grid_params['load_grid'], self.svd_rank_2)
self.map_3 = Map3body(self.grid_num_3, self.bounds_3, self.GP,
bond_struc[1], self.bodies, grid_params['load_grid'],
grid_params['load_svd'], self.svd_rank_3)
def build_bond_struc(self, struc_params):
'''
build a bond structure, used in grid generating
'''
cutoff = np.min(self.GP.cutoffs)
cell = struc_params['cube_lat']
mass_dict = struc_params['mass_dict']
bond_struc = []
for bodies in [2, 3]:
species = [struc_params['species'] for i in range(bodies)]
positions = [[(i+1)/(bodies+1)*cutoff, 0, 0] \
for i in range(bodies)]
bond_struc.append(struc.Structure(cell, species, positions, mass_dict))
if self.bodies == '2':
return bond_struc[0]
elif self.bodies == '3':
return bond_struc[1]
elif self.bodies == '2+3':
return bond_struc
def predict(self, atom_env, mean_only=False):
if self.bodies == '2':
f, v = self.map.predict(atom_env, self.GP, mean_only)
elif self.bodies == '3':
f, v = self.map.predict(atom_env, self.GP, mean_only)
elif self.bodies == '2+3':
f2, kern2, v2 = self.map_2.predict(atom_env, self.GP, mean_only)
f3, kern3, v3 = self.map_3.predict(atom_env, self.GP, mean_only)
f = f2 + f3
v = kern2 + kern3 - np.sum((v2 + v3)**2, axis=0)
return f, v
class Map2body:
def __init__(self, grid_num, bounds, GP, bond_struc, bodies='2', load_prefix=None, svd_rank=0):
'''
param grids: the 1st element is the number of grids for mean prediction,
the 2nd is for var
'''
self.grid_num = grid_num
self.l_bound, self.u_bound = bounds
self.cutoffs = GP.cutoffs
self.bodies = bodies
self.svd_rank = svd_rank
if self.bodies == '2':
y_mean, y_var = self.GenGrid(GP, bond_struc)
elif self.bodies == '2+3':
y_mean, y_var = self.GenGrid_svd(GP, bond_struc)
self.build_map(y_mean, y_var)
def GenGrid(self, GP, bond_struc, processes=mp.cpu_count()):
'''
generate grid data of mean prediction and L^{-1}k* for each triplet
default implemented in a parallelized style
'''
processes = mp.cpu_count()
nop = self.grid_num
bond_lengths = np.linspace(self.l_bound, self.u_bound, nop)
bond_means = np.zeros([nop])
bond_vars = np.zeros([nop, nop])
env1 = env.AtomicEnvironment(bond_struc, 0, self.cutoffs)
env2 = env.AtomicEnvironment(bond_struc, 0, self.cutoffs)
pool_list = [(i, bond_lengths[i], bond_lengths, GP, env1, env2) for i in range(nop)]
pool = mp.Pool(processes=processes)
A_list = pool.starmap(self._GenGrid_inner, pool_list)
pool.close()
pool.join()
A_list.sort(key=lambda x: x[0])
for b1 in range(nop):
bond_means[b1] = A_list[b1][1]
bond_vars[b1, :] = A_list[b1][2]
return bond_means, bond_vars
def _GenGrid_inner(self, b1, r1, bond_lengths, GP, env1, env2):
'''
generate grid for each angle, used to parallelize grid generation
'''
nop = self.grid_num
bond_vars = np.zeros(nop)
bond1 = np.array([r1, 1.0, 0.0, 0.0])
env1.bond_array_2 = np.array([bond1])
# env1.cross_bond_dists = np.array([[0]])
k1_v = GP.get_kernel_vector(env1, 1)
v1_vec = solve_triangular(GP.l_mat, k1_v, lower=True)
mean_diff = np.matmul(k1_v, GP.alpha)
bond_means = mean_diff
for b2, r2 in enumerate(bond_lengths):
bond2 = np.array([r2, 1.0, 0.0, 0.0])
env2.bond_array_2 = np.array([bond2])
# env2.cross_bond_dists = np.array([[0]])
k2_v = GP.get_kernel_vector(env2, 1)
v2_vec = solve_triangular(GP.l_mat, k2_v, lower=True)
self_kern = GP.kernel(env1, env2, 1, 1, GP.hyps, GP.cutoffs)
var_diff = self_kern - np.matmul(v1_vec, v2_vec)
bond_vars[b2] = var_diff
return b1, bond_means, bond_vars
@profile
def GenGrid_svd(self, GP, bond_struc, processes=mp.cpu_count()):
'''
generate grid data of mean prediction and L^{-1}k* for each triplet
implemented in a parallelized style
'''
# ------ change GP kernel to 2 body ------
GP.kernel = two_body
original_cutoffs = np.copy(GP.cutoffs)
GP.cutoffs = [GP.cutoffs[0]]
original_hyps = np.copy(GP.hyps)
GP.hyps = [GP.hyps[0], GP.hyps[1], GP.hyps[-1]]
# ------ construct grids ------
nop = self.grid_num
bond_lengths = np.linspace(self.l_bound[0], self.u_bound[0], nop)
bond_means = np.zeros([nop])
bond_vars = np.zeros([nop, len(GP.alpha)])
env12 = env.AtomicEnvironment(bond_struc, 0, self.cutoffs)
pool_list = [(i, bond_lengths, GP, env12)\
for i in range(nop)]
pool = mp.Pool(processes=processes)
A_list = pool.map(self._GenGrid_svd_inner, pool_list)
for p in range(nop):
bond_means[p] = A_list[p][0]
bond_vars[p, :] = A_list[p][1]
pool.close()
pool.join()
# ------ change back original GP ------
GP.cutoffs = original_cutoffs
GP.hyps = original_hyps
GP.kernel = two_plus_three_body
return bond_means, bond_vars
def _GenGrid_svd_inner(self, params):
'''
generate grid for each angle, used to parallelize grid generation
'''
b, bond_lengths, GP, env12 = params
nop = self.grid_num
r = bond_lengths[b]
env12.bond_array_2 = np.array([[r, 1, 0, 0]])
k12_v = GP.get_kernel_vector(env12, 1)
v12_vec = solve_triangular(GP.l_mat, k12_v, lower=True)
mean_diff = np.matmul(k12_v, GP.alpha)
bond_means = mean_diff
bond_vars = v12_vec
return bond_means, bond_vars
def build_map(self, y_mean, y_var):
'''
build 1-d spline function for mean, 2-d for var
'''
self.mean = SplinesInterpolation(y_mean,
u_bounds=np.array(self.u_bound),
l_bounds=np.array(self.l_bound),
orders=np.array([self.grid_num]))
if self.bodies == '2':
self.var = SplinesInterpolation(y_var,
u_bounds=np.array([self.u_bound, self.u_bound]),
l_bounds=np.array([self.l_bound, self.l_bound]),
orders=np.array([self.grid_num, self.grid_num]))
elif self.bodies == '2+3':
self.var = PCASplines(y_var, u_bounds=np.array(self.u_bound),
l_bounds=np.array(self.l_bound),
orders=np.array([self.grid_num]),
svd_rank=self.svd_rank, load_svd=None)
def predict(self, atom_env, GP, mean_only):
'''
predict for an atom environment
param: atom_env: ChemicalEnvironment
return force on an atom with its variance
'''
bond_lengths = atom_env.bond_array_2[:,0]
bond_dirs = atom_env.bond_array_2[:,1:]
bond_num = len(bond_lengths)
bond_lengths = np.expand_dims(bond_lengths, axis=1)
mean_diffs = self.mean(bond_lengths)
bond_forces = [mean_diffs*bond_dirs[:,i] for i in range(3)]
atom_mean = np.sum(bond_forces, axis=1)
atom_var = np.zeros(3)
if not mean_only:
if self.bodies == '2':
ind_1, ind_2 = np.meshgrid(np.arange(bond_num), np.arange(bond_num))
ind_1 = np.reshape(ind_1, (ind_1.shape[0]*ind_1.shape[1], 1))
ind_2 = np.reshape(ind_2, (ind_2.shape[0]*ind_2.shape[1], 1))
bond_1, bond_2 = (bond_lengths[ind_1], bond_lengths[ind_2])
bond_xyz1 = bond_dirs[ind_1,:]
bond_xyz2 = bond_dirs[ind_2,:]
bond_concat = np.concatenate([bond_1, bond_2], axis=1)
var_diffs = self.var(bond_concat)
var_diffs = np.repeat(np.expand_dims(var_diffs, axis=1), 3, axis=1)
atom_var = np.sum(var_diffs*bond_xyz1[:,0,:]*bond_xyz2[:,0,:], axis=0)
return atom_mean, atom_var
elif self.bodies == '2+3':
sig_2, ls_2, sig_3, ls_3, noise = GP.hyps
LambdaU = self.var(bond_lengths)
VLambdaU = self.var.V @ LambdaU
v = VLambdaU @ bond_dirs
self_kern = np.zeros(3)
for d in range(3):
self_kern[d] = self_two_body_jit(atom_env.bond_array_2, d+1,
sig_2, ls_2, GP.cutoffs[0], quadratic_cutoff)
return atom_mean, self_kern, v
else:
if self.bodies == '2':
return atom_mean, atom_var
elif self.bodies == '2+3':
return atom_mean, 0, 0
class Map3body:
def __init__(self, grid_num, bounds, GP, bond_struc, bodies='3',
load_grid=None, load_svd=None, svd_rank=0):
'''
param grids: the 1st element is the number of grids for mean prediction,
the 2nd is for var
'''
self.grid_num = grid_num
self.l_bound, self.u_bound = bounds
self.cutoffs = GP.cutoffs
self.bodies = bodies
if not load_grid:
y_mean, y_var = self.GenGrid(GP, bond_struc)
else:
y_mean, y_var = utils.merge(load_grid, noa, nop)
self.build_map(y_mean, y_var, svd_rank=svd_rank, load_svd=load_svd)
@profile
def GenGrid(self, GP, bond_struc, processes=mp.cpu_count()):
'''
generate grid data of mean prediction and L^{-1}k* for each triplet
implemented in a parallelized style
'''
original_hyps = np.copy(GP.hyps)
if self.bodies == '2+3':
# ------ change GP kernel to 3 body ------
GP.kernel = three_body
GP.hyps = [GP.hyps[2], GP.hyps[3], GP.hyps[-1]]
# ------ construct grids ------
nop = self.grid_num[0]
noa = self.grid_num[2]
bond_lengths = np.linspace(self.l_bound[0], self.u_bound[0], nop)
angles = np.linspace(self.l_bound[2], self.u_bound[2], noa)
bond_means = np.zeros([nop, nop, noa])
bond_vars = np.zeros([nop, nop, noa, len(GP.alpha)])
env12 = env.AtomicEnvironment(bond_struc, 0, self.cutoffs)
pool_list = [(i, angles[i], bond_lengths, GP, env12)\
for i in range(noa)]
pool = mp.Pool(processes=processes)
A_list = pool.map(self._GenGrid_inner, pool_list)
for a12 in range(noa):
bond_means[:, :, a12] = A_list[a12][0]
bond_vars[:, :, a12, :] = A_list[a12][1]
pool.close()
pool.join()
# ------ change back to original GP ------
if self.bodies == '2+3':
GP.hyps = original_hyps
GP.kernel = two_plus_three_body
return bond_means, bond_vars
def _GenGrid_inner(self, params):
'''
generate grid for each angle, used to parallelize grid generation
'''
a12, angle12, bond_lengths, GP, env12 = params
nop = self.grid_num[0]
noa = self.grid_num[2]
angle12 = angle12
bond_means = np.zeros([nop, nop])
bond_vars = np.zeros([nop, nop, len(GP.alpha)])
for b1, r1 in enumerate(bond_lengths):
r1 = bond_lengths[b1]
for b2, r2 in enumerate(bond_lengths):
x2 = r2 * np.cos(angle12)
y2 = r2 * np.sin(angle12)
r12 = np.linalg.norm(np.array([x2-r1, y2, 0]))
env12.bond_array_3 = np.array([[r1, 1, 0, 0], [r2, 0, 0, 0]])
env12.cross_bond_dists = np.array([[0, r12], [r12, 0]])
k12_v = GP.get_kernel_vector(env12, 1)
v12_vec = solve_triangular(GP.l_mat, k12_v, lower=True)
mean_diff = np.matmul(k12_v, GP.alpha)
bond_means[b1, b2] = mean_diff
bond_vars[b1, b2, :] = v12_vec
return bond_means, bond_vars
def build_map(self, y_mean, y_var, svd_rank, load_svd):
'''
build 3-d spline function for mean,
3-d for the low rank approximation of L^{-1}k*
'''
nop = self.grid_num[0]
noa = self.grid_num[2]
self.mean = SplinesInterpolation(y_mean, u_bounds=self.u_bound,
l_bounds=self.l_bound, orders=np.array([nop, nop, noa]))
self.var = PCASplines(y_var, u_bounds=self.u_bound, l_bounds=self.l_bound,
orders=np.array([nop, nop, noa]), svd_rank=svd_rank,
load_svd=load_svd)
def build_selfkern(self, grid_kern):
self.selfkern = TruncatedSVD(n_components=100, n_iter=7, random_state=42)
self.selfkern.fit(grid_kern)
def predict(self, atom_env, GP, mean_only):
'''
predict for an atom environment
param: atom_env: ChemicalEnvironment
return force on an atom with its variance
'''
t0 = time.time()
bond_array = atom_env.bond_array_3
cross_bond_inds = atom_env.cross_bond_inds
cross_bond_dists = atom_env.cross_bond_dists
triplets = atom_env.triplet_counts
tri_12, tri_21, xyz_1s, xyz_2s = get_triplets(bond_array,
cross_bond_inds, cross_bond_dists, triplets)
tri_12 = np.array(tri_12)
tri_21 = np.array(tri_21)
xyz_1s = np.array(xyz_1s)
xyz_2s = np.array(xyz_2s)
#print('\nget triplets', time.time()-t0)
# predict mean
t0 = time.time()
f0_12 = self.mean(tri_12)
f0_21 = self.mean(tri_21)
f12 = np.diag(f0_12) @ xyz_1s
f21 = np.diag(f0_21) @ xyz_2s
mff_f = np.sum(f12 + f21, axis=0)
#print('mean', time.time()-t0)
# predict var
mff_v = np.zeros(3)
if not mean_only:
t0 = time.time()
self_kern = np.zeros(3)
if self.bodies == '3':
sig, ls, noise = GP.hyps
elif self.bodies == '2+3':
sig2, ls2, sig, ls, noise = GP.hyps
r_cut = GP.cutoffs[1]
for d in range(3):
self_kern[d] = self_three_body_jit(bond_array,
cross_bond_inds,
cross_bond_dists,
triplets,
d+1, sig, ls, r_cut, quadratic_cutoff)
# print('self kern', time.time()-t0, ',value:', self_kern)
t0 = time.time()
v0_12 = self.var(tri_12)
v0_21 = self.var(tri_21)
v12 = v0_12 @ xyz_1s
v21 = v0_21 @ xyz_2s
v = v12 + v21
if self.bodies == '3':
mff_v = - np.sum(v ** 2, axis=0) + self_kern
return mff_f, mff_v
elif self.bodies == '2+3':
v = self.var.V @ v
return mff_f, self_kern, v
# print('var', time.time()-t0, ',value:', mff_v)
else:
if self.bodies == '3':
return mff_f, mff_v
elif self.bodies == '2+3':
return mff_f, 0, 0
@njit
def get_triplets(bond_array, cross_bond_inds,
cross_bond_dists, triplets):
num = np.sum(triplets)
tris1 = np.zeros((num,3))
tris2 = np.zeros((num,3))
tri_dir1 = np.zeros((num,3))
tri_dir2 = np.zeros((num,3))
k = 0
for m in range(bond_array.shape[0]):
r1 = bond_array[m, 0]
c1 = bond_array[m, 1:]
for n in range(triplets[m]):
ind1 = cross_bond_inds[m, m+n+1]
r2 = bond_array[ind1, 0]
c2 = bond_array[ind1, 1:]
a12 = np.arccos(np.sum(c1*c2))
tris1[k] = np.array((r1, r2, a12))
tris2[k] = np.array((r2, r1, a12))
tri_dir1[k] = c1
tri_dir2[k] = c2
k += 1
return tris1, tris2, tri_dir1, tri_dir2
@njit
def quadratic_cutoff(r_cut, ri, ci):
rdiff = r_cut - ri
fi = rdiff * rdiff
fdi = 2 * rdiff * ci
return fi, fdi
@njit
def self_two_body_jit(bond_array, d, sig, ls,
r_cut, cutoff_func):
kern = 0
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
sig2 = sig*sig
for m in range(bond_array.shape[0]):
ri = bond_array[m, 0]
ci = bond_array[m, d]
fi, fdi = cutoff_func(r_cut, ri, ci)
for n in range(m, bond_array.shape[0]):
rj = bond_array[n, 0]
cj = bond_array[n, d]
fj, fdj = cutoff_func(r_cut, rj, cj)
r11 = ri - rj
A = ci * cj
B = r11 * ci
C = r11 * cj
D = r11 * r11
if m == n:
kern += force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2,
ls3, sig2)
else:
kern += 2*force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2,
ls3, sig2)
return kern
@njit
def self_three_body_jit(bond_array, cross_bond_inds,
cross_bond_dists, triplets,
d, sig, ls, r_cut, cutoff_func):
kern = 0
# pre-compute constants that appear in the inner loop
sig2 = sig*sig
ls1 = 1 / (2*ls*ls)
ls2 = 1 / (ls*ls)
ls3 = ls2*ls2
for m in range(bond_array.shape[0]):
ri1 = bond_array[m, 0]
ci1 = bond_array[m, d]
fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)
for n in range(triplets[m]):
ind1 = cross_bond_inds[m, m+n+1]
ri2 = bond_array[ind1, 0]
ci2 = bond_array[ind1, d]
fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)
ri3 = cross_bond_dists[m, m+n+1]
fi3, _ = cutoff_func(r_cut, ri3, 0)
fi = fi1*fi2*fi3
fdi = fdi1*fi2*fi3+fi1*fdi2*fi3
for p in range(m, bond_array.shape[0]):
rj1 = bond_array[p, 0]
cj1 = bond_array[p, d]
fj1, fdj1 = cutoff_func(r_cut, rj1, cj1)
for q in range(triplets[p]):
ind2 = cross_bond_inds[p, p+1+q]
rj2 = bond_array[ind2, 0]
cj2 = bond_array[ind2, d]
fj2, fdj2 = cutoff_func(r_cut, rj2, cj2)
rj3 = cross_bond_dists[p, p+1+q]
fj3, _ = cutoff_func(r_cut, rj3, 0)
fj = fj1*fj2*fj3
fdj = fdj1*fj2*fj3+fj1*fdj2*fj3
tri_kern = triplet_kernel(ci1, ci2, cj1, cj2, ri1, ri2, ri3,
rj1, rj2, rj3, fi, fj, fdi, fdj,
ls1, ls2, ls3, sig2)
if p == m:
kern += tri_kern
else:
kern += 2 * tri_kern
return kern
@njit
def triplet_kernel(ci1, ci2, cj1, cj2, ri1, ri2, ri3, rj1, rj2, rj3, fi, fj,
fdi, fdj, ls1, ls2, ls3, sig2):
r11 = ri1-rj1
r12 = ri1-rj2
r13 = ri1-rj3
r21 = ri2-rj1
r22 = ri2-rj2
r23 = ri2-rj3
r31 = ri3-rj1
r32 = ri3-rj2
r33 = ri3-rj3
# sum over all six permutations
M1 = three_body_helper_1(ci1, ci2, cj1, cj2, r11, r22, r33, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M2 = three_body_helper_2(ci2, ci1, cj2, cj1, r21, r13, r32, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M3 = three_body_helper_2(ci1, ci2, cj1, cj2, r12, r23, r31, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M4 = three_body_helper_1(ci1, ci2, cj2, cj1, r12, r21, r33, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M5 = three_body_helper_2(ci2, ci1, cj1, cj2, r22, r13, r31, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
M6 = three_body_helper_2(ci1, ci2, cj2, cj1, r11, r23, r32, fi, fj, fdi,
fdj, ls1, ls2, ls3, sig2)
return M1 + M2 + M3 + M4 + M5 + M6
@njit
def three_body_helper_1(ci1, ci2, cj1, cj2, r11, r22, r33,
fi, fj, fdi, fdj,
ls1, ls2, ls3, sig2):
A = ci1*cj1+ci2*cj2
B = r11*ci1+r22*ci2
C = r11*cj1+r22*cj2
D = r11*r11+r22*r22+r33*r33
M = force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2)
return M
@njit
def three_body_helper_2(ci1, ci2, cj1, cj2, r12, r23, r31,
fi, fj, fdi, fdj,
ls1, ls2, ls3, sig2):
A = ci1*cj2
B = r12*ci1+r23*ci2
C = r12*cj2+r31*cj1
D = r12*r12+r23*r23+r31*r31
M = force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2)
return M
@njit
def force_helper(A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2):
E = exp(-D * ls1)
F = B * fi
G = -C * fj
I = fdi * fdj
J = F * fdj
K = G * fdi
L = A * fi * fj + F * G * ls2
M = sig2 * (I + (J + K + L) * ls2) * E
return M
| 35.372781 | 100 | 0.528563 |
55551e92cf2b39a4c98938dd34f9e25c5f6d5703 | 7,523 | py | Python | yt_dlp/postprocessor/embedthumbnail.py | king-millez/yt-dlp | ff2751ac9cc7d4150797d3207da9b566396bc796 | [
"Unlicense"
] | null | null | null | yt_dlp/postprocessor/embedthumbnail.py | king-millez/yt-dlp | ff2751ac9cc7d4150797d3207da9b566396bc796 | [
"Unlicense"
] | null | null | null | yt_dlp/postprocessor/embedthumbnail.py | king-millez/yt-dlp | ff2751ac9cc7d4150797d3207da9b566396bc796 | [
"Unlicense"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import os
import subprocess
import struct
import re
import base64
try:
import mutagen
has_mutagen = True
except ImportError:
has_mutagen = False
from .ffmpeg import (
FFmpegPostProcessor,
FFmpegThumbnailsConvertorPP,
)
from ..utils import (
check_executable,
encodeArgument,
encodeFilename,
error_to_compat_str,
PostProcessingError,
prepend_extension,
process_communicate_or_kill,
shell_quote,
)
class EmbedThumbnailPPError(PostProcessingError):
pass
class EmbedThumbnailPP(FFmpegPostProcessor):
def __init__(self, downloader=None, already_have_thumbnail=False):
FFmpegPostProcessor.__init__(self, downloader)
self._already_have_thumbnail = already_have_thumbnail
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
if not info.get('thumbnails'):
self.to_screen('There aren\'t any thumbnails to embed')
return [], info
thumbnail_filename = info['thumbnails'][-1]['filepath']
if not os.path.exists(encodeFilename(thumbnail_filename)):
self.report_warning('Skipping embedding the thumbnail because the file is missing.')
return [], info
# Correct extension for WebP file with wrong extension (see #25687, #25717)
convertor = FFmpegThumbnailsConvertorPP(self._downloader)
convertor.fixup_webp(info, -1)
original_thumbnail = thumbnail_filename = info['thumbnails'][-1]['filepath']
# Convert unsupported thumbnail formats to JPEG (see #25687, #25717)
thumbnail_ext = os.path.splitext(thumbnail_filename)[1][1:]
if thumbnail_ext not in ('jpg', 'png'):
thumbnail_filename = convertor.convert_thumbnail(thumbnail_filename, 'jpg')
thumbnail_ext = 'jpg'
mtime = os.stat(encodeFilename(filename)).st_mtime
success = True
if info['ext'] == 'mp3':
options = [
'-c', 'copy', '-map', '0:0', '-map', '1:0', '-id3v2_version', '3',
'-metadata:s:v', 'title="Album cover"', '-metadata:s:v', 'comment="Cover (front)"']
self.to_screen('Adding thumbnail to "%s"' % filename)
self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)
elif info['ext'] in ['mkv', 'mka']:
options = ['-c', 'copy', '-map', '0', '-dn']
mimetype = 'image/%s' % ('png' if thumbnail_ext == 'png' else 'jpeg')
old_stream, new_stream = self.get_stream_number(
filename, ('tags', 'mimetype'), mimetype)
if old_stream is not None:
options.extend(['-map', '-0:%d' % old_stream])
new_stream -= 1
options.extend([
'-attach', thumbnail_filename,
'-metadata:s:%d' % new_stream, 'mimetype=%s' % mimetype,
'-metadata:s:%d' % new_stream, 'filename=cover.%s' % thumbnail_ext])
self.to_screen('Adding thumbnail to "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
elif info['ext'] in ['m4a', 'mp4', 'mov']:
try:
options = ['-c', 'copy', '-map', '0', '-dn', '-map', '1']
old_stream, new_stream = self.get_stream_number(
filename, ('disposition', 'attached_pic'), 1)
if old_stream is not None:
options.extend(['-map', '-0:%d' % old_stream])
new_stream -= 1
options.extend(['-disposition:%s' % new_stream, 'attached_pic'])
self.to_screen('Adding thumbnail to "%s"' % filename)
self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)
except PostProcessingError as err:
self.report_warning('unable to embed using ffprobe & ffmpeg; %s' % error_to_compat_str(err))
atomicparsley = next((
x for x in ['AtomicParsley', 'atomicparsley']
if check_executable(x, ['-v'])), None)
if atomicparsley is None:
raise EmbedThumbnailPPError('AtomicParsley was not found. Please install')
cmd = [encodeFilename(atomicparsley, True),
encodeFilename(filename, True),
encodeArgument('--artwork'),
encodeFilename(thumbnail_filename, True),
encodeArgument('-o'),
encodeFilename(temp_filename, True)]
cmd += [encodeArgument(o) for o in self._configuration_args('AtomicParsley')]
self.to_screen('Adding thumbnail to "%s"' % filename)
self.write_debug('AtomicParsley command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process_communicate_or_kill(p)
if p.returncode != 0:
msg = stderr.decode('utf-8', 'replace').strip()
raise EmbedThumbnailPPError(msg)
# for formats that don't support thumbnails (like 3gp) AtomicParsley
# won't create to the temporary file
if b'No changes' in stdout:
self.report_warning('The file format doesn\'t support embedding a thumbnail')
success = False
elif info['ext'] in ['ogg', 'opus']:
if not has_mutagen:
raise EmbedThumbnailPPError('module mutagen was not found. Please install using `python -m pip install mutagen`')
self.to_screen('Adding thumbnail to "%s"' % filename)
size_regex = r',\s*(?P<w>\d+)x(?P<h>\d+)\s*[,\[]'
size_result = self.run_ffmpeg(thumbnail_filename, thumbnail_filename, ['-hide_banner'])
mobj = re.search(size_regex, size_result)
width, height = int(mobj.group('w')), int(mobj.group('h'))
mimetype = ('image/%s' % ('png' if thumbnail_ext == 'png' else 'jpeg')).encode('ascii')
# https://xiph.org/flac/format.html#metadata_block_picture
data = bytearray()
data += struct.pack('>II', 3, len(mimetype))
data += mimetype
data += struct.pack('>IIIIII', 0, width, height, 8, 0, os.stat(thumbnail_filename).st_size) # 32 if png else 24
fin = open(thumbnail_filename, "rb")
data += fin.read()
fin.close()
temp_filename = filename
f = mutagen.File(temp_filename)
f.tags['METADATA_BLOCK_PICTURE'] = base64.b64encode(data).decode('ascii')
f.save()
else:
raise EmbedThumbnailPPError('Supported filetypes for thumbnail embedding are: mp3, mkv/mka, ogg/opus, m4a/mp4/mov')
if success and temp_filename != filename:
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
self.try_utime(filename, mtime, mtime)
files_to_delete = [thumbnail_filename]
if self._already_have_thumbnail:
if original_thumbnail == thumbnail_filename:
files_to_delete = []
elif original_thumbnail != thumbnail_filename:
files_to_delete.append(original_thumbnail)
return files_to_delete, info
| 41.794444 | 129 | 0.593247 |
f5918e06368082359ac176def1088732a434040a | 1,219 | py | Python | run.py | morabrandoi/DeepQ2048 | 0c9db4e175037f42852f65c025e591ddb3d9d84f | [
"MIT"
] | null | null | null | run.py | morabrandoi/DeepQ2048 | 0c9db4e175037f42852f65c025e591ddb3d9d84f | [
"MIT"
] | null | null | null | run.py | morabrandoi/DeepQ2048 | 0c9db4e175037f42852f65c025e591ddb3d9d84f | [
"MIT"
] | null | null | null | from agent import Agent
from puzzle import GameGrid
import sys
import numpy as np
# normalize input values
episodes = 61234
if len(sys.argv) == 2:
MODE = sys.argv[1]
else:
MODE = 'train'
environment = GameGrid()
bot = Agent(MODE, episodes)
# five tup is (state, action, state_after, reward, terminal)
for episode in range(episodes):
if MODE != "play":
if episode % 75 == 0 and episode != 0:
bot.target_model.set_weights(bot.model.get_weights())
still_playing = True
state_before_action = environment.give_recent_state()
step = 0
while still_playing:
action = bot.decide_move(state_before_action)
state_after_action, reward, done = environment.take_action(event=None, action=action)
bot.remember(np.array((state_before_action, action, state_after_action, reward, done)))
if done is True:
still_playing = False
if MODE != "play":
bot.train_model()
print(f"Score: {environment.final_score_prev}; Ep: {episode}; Rand: {round(bot.epsilon, 4)} ")
state_before_action = state_after_action
bot.episode_num += 1
bot.update_epsilon()
print(environment.max_score, "MAX SCORE")
| 25.93617 | 106 | 0.670221 |
9aadd5d2e5e4ab0b2ea6cec5240e02911d9a3fab | 4,687 | py | Python | code/api/utils.py | CiscoSecurity/tr-05-docker-relay | 8cf9cced02eb338d06d80419b35e563156ac6c9f | [
"MIT"
] | null | null | null | code/api/utils.py | CiscoSecurity/tr-05-docker-relay | 8cf9cced02eb338d06d80419b35e563156ac6c9f | [
"MIT"
] | null | null | null | code/api/utils.py | CiscoSecurity/tr-05-docker-relay | 8cf9cced02eb338d06d80419b35e563156ac6c9f | [
"MIT"
] | 1 | 2021-03-12T14:06:46.000Z | 2021-03-12T14:06:46.000Z | import json
from json.decoder import JSONDecodeError
import jwt
import requests
from flask import request, jsonify
from jwt import InvalidSignatureError, DecodeError, InvalidAudienceError
from requests.exceptions import ConnectionError, InvalidURL, HTTPError
from api.errors import AuthorizationError, InvalidArgumentError
NO_AUTH_HEADER = 'Authorization header is missing'
WRONG_AUTH_TYPE = 'Wrong authorization type'
WRONG_PAYLOAD_STRUCTURE = 'Wrong JWT payload structure'
WRONG_JWT_STRUCTURE = 'Wrong JWT structure'
WRONG_AUDIENCE = 'Wrong configuration-token-audience'
KID_NOT_FOUND = 'kid from JWT header not found in API response'
WRONG_KEY = ('Failed to decode JWT with provided key. '
'Make sure domain in custom_jwks_host '
'corresponds to your SecureX instance region.')
JWKS_HOST_MISSING = ('jwks_host is missing in JWT payload. Make sure '
'custom_jwks_host field is present in module_type')
WRONG_JWKS_HOST = ('Wrong jwks_host in JWT payload. Make sure domain follows '
'the visibility.<region>.cisco.com structure')
def get_public_key(jwks_host, token):
"""
Get public key by requesting it from specified jwks host.
NOTE. This function is just an example of how one can read and check
anything before passing to an API endpoint, and thus it may be modified in
any way, replaced by another function, or even removed from the module.
"""
expected_errors = (
ConnectionError,
InvalidURL,
KeyError,
JSONDecodeError,
HTTPError
)
try:
response = requests.get(f"https://{jwks_host}/.well-known/jwks")
response.raise_for_status()
jwks = response.json()
public_keys = {}
for jwk in jwks['keys']:
kid = jwk['kid']
public_keys[kid] = jwt.algorithms.RSAAlgorithm.from_jwk(
json.dumps(jwk)
)
kid = jwt.get_unverified_header(token)['kid']
return public_keys.get(kid)
except expected_errors:
raise AuthorizationError(WRONG_JWKS_HOST)
def get_auth_token():
"""
Parse and validate incoming request Authorization header.
NOTE. This function is just an example of how one can read and check
anything before passing to an API endpoint, and thus it may be modified in
any way, replaced by another function, or even removed from the module.
"""
expected_errors = {
KeyError: NO_AUTH_HEADER,
AssertionError: WRONG_AUTH_TYPE
}
try:
scheme, token = request.headers['Authorization'].split()
assert scheme.lower() == 'bearer'
return token
except tuple(expected_errors) as error:
raise AuthorizationError(expected_errors[error.__class__])
def get_jwt():
"""
Get Authorization token and validate its signature
against the public key from /.well-known/jwks endpoint.
NOTE. This function is just an example of how one can read and check
anything before passing to an API endpoint, and thus it may be modified in
any way, replaced by another function, or even removed from the module.
"""
expected_errors = {
KeyError: WRONG_PAYLOAD_STRUCTURE,
AssertionError: JWKS_HOST_MISSING,
InvalidSignatureError: WRONG_KEY,
DecodeError: WRONG_JWT_STRUCTURE,
InvalidAudienceError: WRONG_AUDIENCE,
TypeError: KID_NOT_FOUND
}
token = get_auth_token()
try:
jwks_payload = jwt.decode(token, options={'verify_signature': False})
assert 'jwks_host' in jwks_payload
jwks_host = jwks_payload.get('jwks_host')
key = get_public_key(jwks_host, token)
aud = request.url_root
payload = jwt.decode(
token, key=key, algorithms=['RS256'], audience=[aud.rstrip('/')]
)
return payload['key']
except tuple(expected_errors) as error:
message = expected_errors[error.__class__]
raise AuthorizationError(message)
def get_json(schema):
"""
Parse the incoming request's data as JSON.
Validate it against the specified schema.
NOTE. This function is just an example of how one can read and check
anything before passing to an API endpoint, and thus it may be modified in
any way, replaced by another function, or even removed from the module.
"""
data = request.get_json(force=True, silent=True, cache=False)
message = schema.validate(data)
if message:
raise InvalidArgumentError(message)
return data
def jsonify_data(data):
return jsonify({'data': data})
def jsonify_errors(data):
return jsonify({'errors': [data]})
| 33.241135 | 78 | 0.688287 |
680af4acb651d0ffd25c447a7b5e0bc9bd87fd2a | 4,861 | py | Python | google/appengine/ext/webapp/mail_handlers.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 16 | 2016-04-23T20:16:12.000Z | 2021-10-09T16:58:25.000Z | google/appengine/ext/webapp/mail_handlers.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 53 | 2016-04-06T21:10:43.000Z | 2018-03-19T23:14:33.000Z | google/appengine/ext/webapp/mail_handlers.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 23 | 2016-04-19T05:45:26.000Z | 2021-12-31T23:22:36.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handler library for inbound Mail API.
Contains handlers to help with receiving mail and mail bounces.
InboundMailHandler: Has helper method for easily setting up
email receivers.
BounceNotificationHandler: Has helper method for easily setting
up bounce notification receiver. Will parse HTTP request to
extract bounce notification.
"""
from google.appengine.api import mail
from google.appengine.ext import webapp
MAIL_HANDLER_URL_PATTERN = '/_ah/mail/.+'
BOUNCE_NOTIFICATION_HANDLER_URL_PATH = '/_ah/bounce'
class InboundMailHandler(webapp.RequestHandler):
"""Base class for inbound mail handlers.
Example:
# Sub-class overrides receive method.
class HelloReceiver(InboundMailHandler):
def receive(self, mail_message):
logging.info('Received greeting from %s: %s' % (mail_message.sender,
mail_message.body))
# Map mail handler to appliction.
application = webapp.WSGIApplication([
HelloReceiver.mapping(),
])
"""
def post(self):
"""Transforms body to email request."""
self.receive(mail.InboundEmailMessage(self.request.body))
def receive(self, mail_message):
"""Receive an email message.
Override this method to implement an email receiver.
Args:
mail_message: InboundEmailMessage instance representing received
email.
"""
pass
@classmethod
def mapping(cls):
"""Convenience method to map handler class to application.
Returns:
Mapping from email URL to inbound mail handler class.
"""
return MAIL_HANDLER_URL_PATTERN, cls
class BounceNotificationHandler(webapp.RequestHandler):
"""Base class for bounce notification handlers.
Example:
# Sub-class overrides receive method.
class BounceLogger(BounceNotificationHandler):
def receive(self, bounce_notification):
logging.info('Received bounce from ' %
bounce_notification.notification_from)
# Map bounce handler to application
application = webapp.WSGIApplication([
BounceLogger.mapping(),
])
"""
def post(self):
"""Transforms POST body to bounce request."""
self.receive(BounceNotification(self.request.POST))
def receive(self, bounce_notification):
pass
@classmethod
def mapping(cls):
"""Convenience method to map handler class to application.
Returns:
Mapping from bounce URL to bounce notification handler class.
"""
return BOUNCE_NOTIFICATION_HANDLER_URL_PATH, cls
class BounceNotification(object):
"""Encapsulates a bounce notification received by the application."""
def __init__(self, post_vars):
"""Constructs a new BounceNotification from an HTTP request.
Properties:
original: a dict describing the message that caused the bounce.
notification: a dict describing the bounce itself.
original_raw_message: the raw message that caused the bounce.
The 'original' and 'notification' dicts contain the following keys:
to, cc, bcc, from, subject, text
Args:
post_vars: a dict-like object containing bounce information.
This is typically the self.request.POST variable of a RequestHandler
object. The following keys are handled in the dict:
original-from
original-to
original-cc
original-bcc
original-subject
original-text
notification-from
notification-to
notification-cc
notification-bcc
notification-subject
notification-text
raw-message
"""
self.__original = {}
self.__notification = {}
for field in ['to', 'cc', 'bcc', 'from', 'subject', 'text']:
self.__original[field] = post_vars.get('original-' + field, '')
self.__notification[field] = post_vars.get('notification-' + field, '')
self.__original_raw_message = mail.InboundEmailMessage(
post_vars.get('raw-message', ''))
@property
def original(self):
return self.__original
@property
def notification(self):
return self.__notification
@property
def original_raw_message(self):
return self.__original_raw_message
| 26.856354 | 78 | 0.689776 |
77dc2796d56bd73cb649b50f372593660b471f7a | 5,431 | py | Python | programs/parsing/syrnt.py | ETCBC/linksyr | 3ba42432b0ed95c1ad65eb06865c3a5f7175f8b6 | [
"MIT"
] | 1 | 2020-10-05T11:45:56.000Z | 2020-10-05T11:45:56.000Z | programs/parsing/syrnt.py | ETCBC/linksyr | 3ba42432b0ed95c1ad65eb06865c3a5f7175f8b6 | [
"MIT"
] | null | null | null | programs/parsing/syrnt.py | ETCBC/linksyr | 3ba42432b0ed95c1ad65eb06865c3a5f7175f8b6 | [
"MIT"
] | 1 | 2021-09-09T11:48:59.000Z | 2021-09-09T11:48:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# now it works in python 2.6 and 3.x!
from __future__ import unicode_literals, print_function
import os.path
from collections import namedtuple
from constants import NT_BOOKS, SyrNT as c
# Read database location from config file
try: # allow for different module names in python 2 and 3
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
config = ConfigParser()
config.read('linksyr.conf')
datadir = config.get('syrnt','datadir')
filename = config.get('syrnt','filename')
dbpath = os.path.join(datadir, filename)
NT_OFFSET = 52 # starting id of NT books
# TODO check discrepancies between SEDRA and Syromorph NT.
# It seems that
# 521601715
# 550104214
# 562103016
# 562302606
# 580201122
# 781901718
# 782202101
# 782202102
# 782202103
# 782202104
# 782202105
# 782202106
# 782202107
# 782202108
# helper functions
def read_db_file():
from io import open # This is for python 2.6
with open(dbpath) as f:
for line in f:
yield line
def get_verse_labels():
for book_id, (book_name, chapters) in enumerate(NT_BOOKS, NT_OFFSET):
for chapter, versecount in enumerate(chapters, 1):
for verse in range(1, versecount + 1):
yield (book_name, book_id, chapter, verse)
def maketrans(s1, s2):
'''Make a simple translation table'''
# There are more sophisticated maketrans-functions (str.maketrans()
# in python 3 and string.maketrans() in python 2, but they are not
# compatible. The dictionary works in all versions from at least 2.6)
return dict(zip([ord(a) for a in s1], [ord(a) for a in s2]))
# translation tables:
# source is always SEDRA transcription, so only need to specify 'to'.
towit = maketrans('AOKY;CEI/XW','>WXVJK<PYQC')
tosyr = maketrans('ABGDHOZKY;CLMNSEI/XRWT','ܐܒܓܕܗܘܙܚܛܝܟܠܡܢܣܥܦܨܩܪܫܬ')
notr = maketrans('','')
def postag(w):
return w.postag
def supertag(w):
return '+'.join([e for e in (w.prefix, w.postag, w.suffix) if e])
# class NTWord
class NTWord:
Annotation = namedtuple('Annotation',
[f[0].replace(' ','_') for f in c.ANNOTATIONS])
Location = namedtuple('Location',
['book_name', 'book_id', 'chapter', 'verse', 'w_num'])
def __init__(self, w_str, location, tr):
if tr is not None:
w_str = w_str.translate(tr)
self.cons_str, a_str = w_str.split('|')
self.location = NTWord.Location(*location)
self.annotation = NTWord.Annotation(*[int(v) if v.isdigit() else v
for v in a_str.split('#')])
self.ann_values = NTWord.Annotation(*[f[1][v] if f[1] else v
for f, v in zip(c.ANNOTATIONS, self.annotation)])
# some shortcuts:
self.stem = self.ann_values.stem
self.lexeme = self.ann_values.lexeme
self.root = self.ann_values.root
self.prefix = self.ann_values.prefix
self.suffix = self.ann_values.suffix
self.seyame = self.ann_values.seyame
self.postag = self.ann_values.grammatical_category
def __repr__(self):
return '<NTWord {0}: "{1}">'.format(self.get_loc_str(), self.cons_str)
def __str__(self):
return self.cons_str
def get_loc_str(self):
'''Combine location elements into fixed-length string'''
return '{0:02}{1:02}{2:03}{3:02}'.format(*self.location[1:])
# return get_loc_id(self.book_id, self.chapter, self.verse, self.w_num)
class SyrNT:
def __init__(self, tr=towit):
self._nt_verses = self._nt_verses(tr)
self._nt_words = self._nt_words()
self._idx = 0
def __getitem__(self, key):
return self._nt_words[key]
def __len__(self):
return len(self._nt_words)
def __iter__(self):
return self
def __next__(self):
try:
item = self._nt_words[self._idx]
except IndexError:
self._idx = 0
raise StopIteration()
self._idx += 1
return item
next = __next__ # Python 2
def _nt_verses(self, tr):
nt_verses = []
for verse_label, line in zip(get_verse_labels(), read_db_file()):
verse = [NTWord(w_str, verse_label + (w_num,), tr)
for w_num, w_str in enumerate(line.strip().split(), 1)]
nt_verses.append((verse_label, verse))
return nt_verses
def _nt_words(self):
return [w for l, v in self._nt_verses for w in v]
def verses(self, label=False):
for v in self._nt_verses:
yield v if label else v[1]
def words(self):
for w in self._nt_words:
yield w
def tag_sentences(self, tag=postag):
for s in self.verses():
yield [(w.cons_str, tag(w)) for w in s]
def printlines(self):
pl = None # pl: previous label
for l, v in self.verses(label=True):
if pl is None or pl[1] != l[1] or pl[2] != l[2]:
if pl is not None: # no newline before first chapter
yield ''
yield '{0} chapter {1}'.format(l[0], l[2])
pl = l
yield '{0:2} {1}'.format(l[3], ' '.join([w.cons_str for w in v]))
def main():
for line in SyrNT(tosyr).printlines():
print(line)
def usage():
print(__doc__)
if __name__ == "__main__":
main()
| 29.516304 | 79 | 0.617566 |
8984da6beedc707537889074266bf96659e6905c | 2,430 | py | Python | homeassistant/components/cppm_tracker/device_tracker.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/cppm_tracker/device_tracker.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/cppm_tracker/device_tracker.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Support for ClearPass Policy Manager."""
from __future__ import annotations
from datetime import timedelta
import logging
from clearpasspy import ClearPass
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_API_KEY, CONF_CLIENT_ID, CONF_HOST
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
SCAN_INTERVAL = timedelta(seconds=120)
GRANT_TYPE = "client_credentials"
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_API_KEY): cv.string,
}
)
_LOGGER = logging.getLogger(__name__)
def get_scanner(hass: HomeAssistant, config: ConfigType) -> DeviceScanner | None:
"""Initialize Scanner."""
data = {
"server": config[DOMAIN][CONF_HOST],
"grant_type": GRANT_TYPE,
"secret": config[DOMAIN][CONF_API_KEY],
"client": config[DOMAIN][CONF_CLIENT_ID],
}
cppm = ClearPass(data)
if cppm.access_token is None:
return None
_LOGGER.debug("Successfully received Access Token")
return CPPMDeviceScanner(cppm)
class CPPMDeviceScanner(DeviceScanner):
"""Initialize class."""
def __init__(self, cppm):
"""Initialize class."""
self._cppm = cppm
self.results = None
def scan_devices(self):
"""Initialize scanner."""
self.get_cppm_data()
return [device["mac"] for device in self.results]
def get_device_name(self, device):
"""Retrieve device name."""
name = next(
(result["name"] for result in self.results if result["mac"] == device), None
)
return name
def get_cppm_data(self):
"""Retrieve data from Aruba Clearpass and return parsed result."""
endpoints = self._cppm.get_endpoints(100)["_embedded"]["items"]
devices = []
for item in endpoints:
if self._cppm.online_status(item["mac_address"]):
device = {"mac": item["mac_address"], "name": item["mac_address"]}
devices.append(device)
else:
continue
_LOGGER.debug("Devices: %s", devices)
self.results = devices
| 29.277108 | 88 | 0.665432 |
d617f95b24c8a5b467db0c643b287ab37f6a2cfc | 1,307 | py | Python | src/gui/main_form.py | hoangphanthai/Activity_Recognition_Modelling | 6230ab498527460758157f4bfb3a70d5c17135fa | [
"MIT"
] | null | null | null | src/gui/main_form.py | hoangphanthai/Activity_Recognition_Modelling | 6230ab498527460758157f4bfb3a70d5c17135fa | [
"MIT"
] | null | null | null | src/gui/main_form.py | hoangphanthai/Activity_Recognition_Modelling | 6230ab498527460758157f4bfb3a70d5c17135fa | [
"MIT"
] | 1 | 2022-03-20T19:50:58.000Z | 2022-03-20T19:50:58.000Z |
import tkinter as tk
from tkinter import ttk
from sys import platform
from globals import log_message
from .tab_data_import import TabDataImport
from .tab_training import TabTraining
class Mainform:
def __init__(self):
self.window = tk.Tk()
self.note_book = ttk.Notebook(self.window)
self.tabImport = TabDataImport(self.note_book)
self.tabTraining = TabTraining(self.note_book)
self.tabImport.training_tab = self.tabTraining
self.note_book.add(self.tabImport, text = 'Import Datasets')
self.note_book.add(self.tabTraining, text = 'Training Models')
self.note_book.pack(expand = 1, fill = 'both')
self.note_book.tab(1, state = 'disabled')
def start(self):
self.window.title('Activity Recognition Modelling')
# Checking the OS platform
if platform == "darwin": # Mac OS
self.window.geometry('1230x700')
self.window.geometry('+{}+{}'.format(30, 30))
else: # Windows or others
self.window.geometry('935x700')
self.window.geometry('+{}+{}'.format(210, 30))
self.window.resizable(0, 0)
# self.window.deiconify()
self.window.mainloop()
log_message('Thank you for your patience!') | 31.878049 | 70 | 0.631217 |
8dfb6999464838804a3a7b388e9bce3413b3d522 | 74 | py | Python | events/__init__.py | jlowe77/Eris-Cogs | 2ade8f82db3477527af3cff3b48ebb281e1a6987 | [
"Apache-2.0"
] | 6 | 2020-05-13T20:43:53.000Z | 2021-06-23T16:10:13.000Z | events/__init__.py | jlowe77/Eris-Cogs | 2ade8f82db3477527af3cff3b48ebb281e1a6987 | [
"Apache-2.0"
] | 12 | 2019-04-02T13:29:10.000Z | 2020-03-27T18:07:16.000Z | events/__init__.py | jlowe77/Eris-Cogs | 2ade8f82db3477527af3cff3b48ebb281e1a6987 | [
"Apache-2.0"
] | 9 | 2020-06-07T21:46:54.000Z | 2022-03-01T22:49:02.000Z | from .events import Events
def setup(bot):
bot.add_cog(Events(bot))
| 12.333333 | 28 | 0.702703 |
c480eb7b2dce781b377d428e61e271c792ea5087 | 1,022 | py | Python | monasca_common/kafka/legacy_kafka_message.py | zhangjm12/monasca-common | 2ebc766534eba6163e98b94a1f114ece18739fff | [
"Apache-2.0"
] | 26 | 2015-10-18T02:54:54.000Z | 2022-02-15T01:36:41.000Z | monasca_common/kafka/legacy_kafka_message.py | zhangjm12/monasca-common | 2ebc766534eba6163e98b94a1f114ece18739fff | [
"Apache-2.0"
] | 18 | 2019-11-01T13:03:36.000Z | 2022-02-16T02:28:52.000Z | monasca_common/kafka/legacy_kafka_message.py | zhangjm12/monasca-common | 2ebc766534eba6163e98b94a1f114ece18739fff | [
"Apache-2.0"
] | 22 | 2016-06-01T11:47:17.000Z | 2020-02-11T14:41:45.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class LegacyKafkaMessage(object):
def __init__(self, raw_message):
self.m_partition = raw_message[0]
self.m_offset = raw_message[1].offset
self.m_key = raw_message[1].message.key
self.m_value = raw_message[1].message.value
def key(self):
return self.m_key
def offset(self):
return self.m_offset
def partition(self):
return self.m_partition
def value(self):
return self.m_value
| 30.969697 | 76 | 0.700587 |
41fb1696bc371837e4ab3a82bb897aa3eb63fd3d | 981 | py | Python | projects/vdk-control-cli/src/vdk/internal/control/command_groups/version_group/version.py | alod83/versatile-data-kit | 9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8 | [
"Apache-2.0"
] | 100 | 2021-10-04T09:32:04.000Z | 2022-03-30T11:23:53.000Z | projects/vdk-control-cli/src/vdk/internal/control/command_groups/version_group/version.py | alod83/versatile-data-kit | 9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8 | [
"Apache-2.0"
] | 208 | 2021-10-04T16:56:40.000Z | 2022-03-31T10:41:44.000Z | projects/vdk-control-cli/src/vdk/internal/control/command_groups/version_group/version.py | alod83/versatile-data-kit | 9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8 | [
"Apache-2.0"
] | 14 | 2021-10-11T14:15:13.000Z | 2022-03-11T13:39:17.000Z | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import click
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
# https://packaging.python.org/guides/single-sourcing-package-version/#
try:
# Change here if project is renamed and does not equal the setuptools metadata.name
dist_name = "vdk-control-cli"
__version__ = get_distribution(dist_name).version
except DistributionNotFound: # pragma: no cover
__version__ = "unknown"
def build_details():
try:
from vdk.internal.control import vdk_control_build_info
build = [
f"{key}={value}"
for key, value in vdk_control_build_info.__dict__.items()
if not key.startswith("_")
]
return ", ".join(build)
except:
return ""
@click.command(help="Prints the version of the client")
def version():
click.echo(f"""Version: {__version__}\nBuild details: {build_details()}""")
| 28.852941 | 87 | 0.691131 |
81057d4595628b9d5c87e76cae13708c0e6074ed | 1,178 | py | Python | tempfile/setup.py | badgeteam/micropython-lib | fca0235c166ebbada489d88c42fc549267832797 | [
"PSF-2.0"
] | null | null | null | tempfile/setup.py | badgeteam/micropython-lib | fca0235c166ebbada489d88c42fc549267832797 | [
"PSF-2.0"
] | null | null | null | tempfile/setup.py | badgeteam/micropython-lib | fca0235c166ebbada489d88c42fc549267832797 | [
"PSF-2.0"
] | 2 | 2017-11-21T16:53:03.000Z | 2021-07-29T08:47:14.000Z | import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import optimize_upip
setup(name='micropython-tempfile',
version='0.0.2',
description='Dummy tempfile module for MicroPython',
long_description='This is a dummy implementation of a module for MicroPython standard library.\nIt contains zero or very little functionality, and primarily intended to\navoid import errors (using idea that even if an application imports a\nmodule, it may be not using it onevery code path, so may work at least\npartially). It is expected that more complete implementation of the module\nwill be provided later. Please help with the development if you are\ninterested in this module.',
url='https://github.com/micropython/micropython-lib',
author='MicroPython Developers',
author_email='micro-python@googlegroups.com',
maintainer='MicroPython Developers',
maintainer_email='micro-python@googlegroups.com',
license='MIT',
cmdclass={'optimize_upip': optimize_upip.OptimizeUpip},
py_modules=['tempfile'])
| 56.095238 | 492 | 0.756367 |
3c9f2cc274f5ab2224364f4415ffa96e2f7a0127 | 239 | py | Python | awair/utils.py | ybbarng/awair | 0b18e4159fc473cd57eff9584cb71200fed4e162 | [
"MIT"
] | 1 | 2019-05-22T12:38:42.000Z | 2019-05-22T12:38:42.000Z | awair/utils.py | ybbarng/awair | 0b18e4159fc473cd57eff9584cb71200fed4e162 | [
"MIT"
] | null | null | null | awair/utils.py | ybbarng/awair | 0b18e4159fc473cd57eff9584cb71200fed4e162 | [
"MIT"
] | null | null | null | from datetime import datetime
import pytz
ISO_8601_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def str_to_datetime(string):
return datetime.strptime(string, ISO_8601_FORMAT)
def datetime_to_str(dt):
return dt.strftime(ISO_8601_FORMAT)
| 15.933333 | 53 | 0.74477 |
4e34fd2ad0e9511ad04ec763491e5f8e366c2c81 | 1,320 | py | Python | src/panoramic/cli/diff.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-11-13T17:26:59.000Z | 2021-03-19T15:11:26.000Z | src/panoramic/cli/diff.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-10-28T10:22:35.000Z | 2021-01-27T17:33:58.000Z | src/panoramic/cli/diff.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 3 | 2021-01-26T07:58:03.000Z | 2021-03-11T13:28:34.000Z | import difflib
from panoramic.cli.file_utils import dump_yaml
from panoramic.cli.print import Color, echo_info, echo_style
from panoramic.cli.state import ActionList
_LINE_START_TO_COLOR = {
'+': Color.GREEN,
'-': Color.RED,
'@': Color.BLUE,
}
def echo_diff(actions: ActionList):
for action in actions.actions:
if action.is_deletion:
echo_style(action.description, fg=Color.RED)
elif action.is_creation:
echo_style(action.description, fg=Color.GREEN)
else:
# Assumes update
echo_style(action.description, fg=Color.YELLOW)
current_yaml = dump_yaml(action.current.to_dict()) if action.current is not None else ''
desired_yaml = dump_yaml(action.desired.to_dict()) if action.desired is not None else ''
assert current_yaml is not None and desired_yaml is not None
current_yaml_lines = current_yaml.splitlines(keepends=True)
desired_yaml_lines = desired_yaml.splitlines(keepends=True)
diff = difflib.unified_diff(current_yaml_lines, desired_yaml_lines, fromfile='current', tofile='desired')
for line in diff:
color = _LINE_START_TO_COLOR.get(line[0])
echo_style(line, fg=color, nl=False)
echo_info('')
| 33.846154 | 117 | 0.665909 |
16a8dd21c94776bbe49d20cc5f62107eb3fb39bf | 1,992 | py | Python | superset/views/sql_lab.py | mbeacom/superset | 8dfe2b70b2ee85c8cfe79a7a37eefaa790158bf1 | [
"Apache-2.0"
] | 1 | 2018-02-12T18:15:08.000Z | 2018-02-12T18:15:08.000Z | superset/views/sql_lab.py | mbeacom/superset | 8dfe2b70b2ee85c8cfe79a7a37eefaa790158bf1 | [
"Apache-2.0"
] | null | null | null | superset/views/sql_lab.py | mbeacom/superset | 8dfe2b70b2ee85c8cfe79a7a37eefaa790158bf1 | [
"Apache-2.0"
] | 4 | 2017-04-28T07:52:00.000Z | 2017-05-03T12:34:41.000Z | from flask import redirect, g
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import gettext as __
from superset import appbuilder
from superset.models.sql_lab import Query, SavedQuery
from .base import SupersetModelView, BaseSupersetView, DeleteMixin
class QueryView(SupersetModelView):
datamodel = SQLAInterface(Query)
list_columns = ['user', 'database', 'status', 'start_time', 'end_time']
appbuilder.add_view(
QueryView,
"Queries",
label=__("Queries"),
category="Manage",
category_label=__("Manage"),
icon="fa-search")
class SavedQueryView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(SavedQuery)
list_columns = [
'label', 'user', 'database', 'schema', 'description',
'modified', 'pop_tab_link']
show_columns = [
'id', 'label', 'user', 'database',
'description', 'sql', 'pop_tab_link']
search_columns = ('label', 'user', 'database', 'schema', 'changed_on')
add_columns = ['label', 'database', 'description', 'sql']
edit_columns = add_columns
base_order = ('changed_on', 'desc')
def pre_add(self, obj):
obj.user = g.user
def pre_update(self, obj):
self.pre_add(obj)
class SavedQueryViewApi(SavedQueryView):
show_columns = ['label', 'db_id', 'schema', 'description', 'sql']
add_columns = show_columns
edit_columns = add_columns
appbuilder.add_view_no_menu(SavedQueryViewApi)
appbuilder.add_view_no_menu(SavedQueryView)
appbuilder.add_link(
__('Saved Queries'),
href='/sqllab/my_queries/',
icon="fa-save",
category='SQL Lab')
class SqlLab(BaseSupersetView):
"""The base views for Superset!"""
@expose("/my_queries/")
def my_queries(self):
"""Assigns a list of found users to the given role."""
return redirect(
'/savedqueryview/list/?_flt_0_user={}'.format(g.user.id))
appbuilder.add_view_no_menu(SqlLab)
| 28.056338 | 75 | 0.687249 |
a281bbbe9468cfd0fc7f64068a0699911cc5e4cd | 112,437 | py | Python | server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/completers/language_server/language_server_completer.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | 10 | 2020-07-21T21:59:54.000Z | 2021-07-19T11:01:47.000Z | server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/completers/language_server/language_server_completer.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | null | null | null | server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/completers/language_server/language_server_completer.py | hkdb/sysconf | 99d334f7309657647059c4b37f25e33dffc81fc3 | [
"MIT"
] | 1 | 2021-01-30T18:17:01.000Z | 2021-01-30T18:17:01.000Z | # Copyright (C) 2017-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from functools import partial
import abc
import collections
import contextlib
import json
import logging
import os
import queue
import subprocess
import threading
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
from ycmd import extra_conf_store, responses, utils
from ycmd.completers.completer import Completer, CompletionsCache
from ycmd.completers.completer_utils import GetFileContents, GetFileLines
from ycmd.utils import LOGGER
from ycmd.completers.language_server import language_server_protocol as lsp
NO_HOVER_INFORMATION = 'No hover information.'
# All timeout values are in seconds
REQUEST_TIMEOUT_COMPLETION = 5
REQUEST_TIMEOUT_INITIALISE = 30
REQUEST_TIMEOUT_COMMAND = 30
CONNECTION_TIMEOUT = 5
# Size of the notification ring buffer
MAX_QUEUED_MESSAGES = 250
PROVIDERS_MAP = {
'codeActionProvider': (
lambda self, request_data, args: self.GetCodeActions( request_data, args )
),
'declarationProvider': (
lambda self, request_data, args: self.GoTo( request_data,
[ 'Declaration' ] )
),
'definitionProvider': (
lambda self, request_data, args: self.GoTo( request_data, [ 'Definition' ] )
),
( 'definitionProvider', 'declarationProvider' ): (
lambda self, request_data, args: self.GoTo( request_data,
[ 'Definition',
'Declaration' ] )
),
'documentFormattingProvider': (
lambda self, request_data, args: self.Format( request_data )
),
'executeCommandProvider': (
lambda self, request_data, args: self.ExecuteCommand( request_data,
args )
),
'implementationProvider': (
lambda self, request_data, args: self.GoTo( request_data,
[ 'Implementation' ] )
),
'referencesProvider': (
lambda self, request_data, args: self.GoTo( request_data,
[ 'References' ] )
),
'renameProvider': (
lambda self, request_data, args: self.RefactorRename( request_data, args )
),
'typeDefinitionProvider': (
lambda self, request_data, args: self.GoTo( request_data,
[ 'TypeDefinition' ] )
),
}
# Each command is mapped to a list of providers. This allows a command to use
# another provider if the LSP server doesn't support the main one. For instance,
# GoToDeclaration is mapped to the same provider as GoToDefinition if there is
# no declaration provider. A tuple of providers is also allowed for commands
# like GoTo where it's convenient to jump to the declaration if already on the
# definition and vice versa.
DEFAULT_SUBCOMMANDS_MAP = {
'ExecuteCommand': [ 'executeCommandProvider' ],
'FixIt': [ 'codeActionProvider' ],
'GoToDefinition': [ 'definitionProvider' ],
'GoToDeclaration': [ 'declarationProvider', 'definitionProvider' ],
'GoTo': [ ( 'definitionProvider', 'declarationProvider' ),
'definitionProvider' ],
'GoToType': [ 'typeDefinitionProvider' ],
'GoToImplementation': [ 'implementationProvider' ],
'GoToReferences': [ 'referencesProvider' ],
'RefactorRename': [ 'renameProvider' ],
'Format': [ 'documentFormattingProvider' ],
}
class NoHoverInfoException( Exception ):
""" Raised instead of RuntimeError for empty hover responses, to allow
completers to easily distinguish empty hover from other errors."""
pass # pragma: no cover
class ResponseTimeoutException( Exception ):
"""Raised by LanguageServerConnection if a request exceeds the supplied
time-to-live."""
pass # pragma: no cover
class ResponseAbortedException( Exception ):
"""Raised by LanguageServerConnection if a request is canceled due to the
server shutting down."""
pass # pragma: no cover
class ResponseFailedException( Exception ):
"""Raised by LanguageServerConnection if a request returns an error"""
pass # pragma: no cover
class IncompatibleCompletionException( Exception ):
"""Internal exception returned when a completion item is encountered which is
not supported by ycmd, or where the completion item is invalid."""
pass # pragma: no cover
class LanguageServerConnectionTimeout( Exception ):
"""Raised by LanguageServerConnection if the connection to the server is not
established with the specified timeout."""
pass # pragma: no cover
class LanguageServerConnectionStopped( Exception ):
"""Internal exception raised by LanguageServerConnection when the server is
successfully shut down according to user request."""
pass # pragma: no cover
class Response:
"""Represents a blocking pending request.
LanguageServerCompleter handles create an instance of this class for each
request that expects a response and wait for its response synchronously by
calling |AwaitResponse|.
The LanguageServerConnection message pump thread calls |ResponseReceived| when
the associated response is read, which triggers the |AwaitResponse| method to
handle the actual response"""
def __init__( self, response_callback=None ):
"""In order to receive a callback in the message pump thread context, supply
a method taking ( response, message ) in |response_callback|. Note that
|response| is _this object_, not the calling object, and message is the
message that was received. NOTE: This should not normally be used. Instead
users should synchronously wait on AwaitResponse."""
self._event = threading.Event()
self._message = None
self._response_callback = response_callback
def ResponseReceived( self, message ):
"""Called by the message pump thread when the response with corresponding ID
is received from the server. Triggers the message received event and calls
any configured message-pump-thread callback."""
self._message = message
self._event.set()
if self._response_callback:
self._response_callback( self, message )
def Abort( self ):
"""Called when the server is shutting down."""
self.ResponseReceived( None )
def AwaitResponse( self, timeout ):
"""Called by clients to wait synchronously for either a response to be
received or for |timeout| seconds to have passed.
Returns the message, or:
- throws ResponseFailedException if the request fails
- throws ResponseTimeoutException in case of timeout
- throws ResponseAbortedException in case the server is shut down."""
self._event.wait( timeout )
if not self._event.is_set():
raise ResponseTimeoutException( 'Response Timeout' )
if self._message is None:
raise ResponseAbortedException( 'Response Aborted' )
if 'error' in self._message:
error = self._message[ 'error' ]
raise ResponseFailedException( 'Request failed: {0}: {1}'.format(
error.get( 'code' ) or 0,
error.get( 'message' ) or 'No message' ) )
return self._message
class LanguageServerConnection( threading.Thread ):
"""
Abstract language server communication object.
This connection runs as a thread and is generally only used directly by
LanguageServerCompleter, but is instantiated, started and stopped by
concrete LanguageServerCompleter implementations.
Implementations of this class are required to provide the following methods:
- TryServerConnectionBlocking: Connect to the server and return when the
connection is established
- Shutdown: Close any sockets or channels prior to the thread exit
- WriteData: Write some data to the server
- ReadData: Read some data from the server, blocking until some data is
available
Threads:
LSP is by its nature an asynchronous protocol. There are request-reply like
requests and unsolicited notifications. Receipt of the latter is mandatory,
so we cannot rely on there being a bottle thread executing a client request.
So we need a message pump and dispatch thread. This is actually the
LanguageServerConnection, which implements Thread. It's main method simply
listens on the socket/stream and dispatches complete messages to the
LanguageServerCompleter. It does this:
- For requests: Using python event objects, wrapped in the Response class
- For notifications: via a synchronized Queue.
NOTE: Some handling is done in the dispatch thread. There are certain
notifications which we have to handle when we get them, such as:
- Initialization messages
- Diagnostics
In these cases, we allow some code to be executed inline within the dispatch
thread, as there is no other thread guaranteed to execute. These are handled
by callback functions and mutexes.
Using this class in concrete LanguageServerCompleter implementations:
Startup
- Call Start() and AwaitServerConnection()
- AwaitServerConnection() throws LanguageServerConnectionTimeout if the
server fails to connect in a reasonable time.
Shutdown
- Call Stop() prior to shutting down the downstream server (see
LanguageServerCompleter.ShutdownServer to do that part)
- Call Close() to close any remaining streams. Do this in a request thread.
DO NOT CALL THIS FROM THE DISPATCH THREAD. That is, Close() must not be
called from a callback supplied to GetResponseAsync, or in any callback or
method with a name like "*InPollThread". The result would be a deadlock.
Footnote: Why does this interface exist?
Language servers are at liberty to provide their communication interface
over any transport. Typically, this is either stdio or a socket (though some
servers require multiple sockets). This interface abstracts the
implementation detail of the communication from the transport, allowing
concrete completers to choose the right transport according to the
downstream server (i.e. Whatever works best).
If in doubt, use the StandardIOLanguageServerConnection as that is the
simplest. Socket-based connections often require the server to connect back
to us, which can lead to complexity and possibly blocking.
"""
@abc.abstractmethod
def TryServerConnectionBlocking( self ):
pass # pragma: no cover
def _CancelWatchdogThreads( self ):
for observer in self._observers:
observer.stop()
observer.join()
def Shutdown( self ):
self._CancelWatchdogThreads()
@abc.abstractmethod
def WriteData( self, data ):
pass # pragma: no cover
@abc.abstractmethod
def ReadData( self, size=-1 ):
pass # pragma: no cover
def __init__( self,
project_directory,
watchdog_factory,
notification_handler = None ):
super().__init__()
self._watchdog_factory = watchdog_factory
self._project_directory = project_directory
self._last_id = 0
self._responses = {}
self._response_mutex = threading.Lock()
self._notifications = queue.Queue( maxsize=MAX_QUEUED_MESSAGES )
self._connection_event = threading.Event()
self._stop_event = threading.Event()
self._notification_handler = notification_handler
self._collector = RejectCollector()
self._observers = []
@contextlib.contextmanager
def CollectApplyEdits( self, collector ):
old_collector = self._collector
self._collector = collector
try:
yield
finally:
self._collector = old_collector
def run( self ):
try:
# Wait for the connection to fully establish (this runs in the thread
# context, so we block until a connection is received or there is a
# timeout, which throws an exception)
self.TryServerConnectionBlocking()
self._connection_event.set()
# Blocking loop which reads whole messages and calls _DispatchMessage
self._ReadMessages()
except LanguageServerConnectionStopped:
# Abort any outstanding requests
with self._response_mutex:
for _, response in self._responses.items():
response.Abort()
self._responses.clear()
LOGGER.debug( 'Connection was closed cleanly' )
except Exception:
LOGGER.exception( 'The language server communication channel closed '
'unexpectedly. Issue a RestartServer command to '
'recover.' )
# Abort any outstanding requests
with self._response_mutex:
for _, response in self._responses.items():
response.Abort()
self._responses.clear()
# Close any remaining sockets or files
self.Shutdown()
def Start( self ):
# Wraps the fact that this class inherits (privately, in a sense) from
# Thread.
self.start()
def Stop( self ):
self._stop_event.set()
def Close( self ):
self.Shutdown()
try:
self.join()
except RuntimeError:
LOGGER.exception( "Shutting down dispatch thread while it isn't active" )
# This actually isn't a problem in practice.
def IsStopped( self ):
return self._stop_event.is_set()
def NextRequestId( self ):
with self._response_mutex:
self._last_id += 1
return self._last_id
def GetResponseAsync( self, request_id, message, response_callback=None ):
"""Issue a request to the server and return immediately. If a response needs
to be handled, supply a method taking ( response, message ) in
response_callback. Note |response| is the instance of Response and message
is the message received from the server.
Returns the Response instance created."""
response = Response( response_callback )
with self._response_mutex:
assert request_id not in self._responses
self._responses[ request_id ] = response
LOGGER.debug( 'TX: Sending message: %r', message )
self.WriteData( message )
return response
def GetResponse( self, request_id, message, timeout ):
"""Issue a request to the server and await the response. See
Response.AwaitResponse for return values and exceptions."""
response = self.GetResponseAsync( request_id, message )
return response.AwaitResponse( timeout )
def SendNotification( self, message ):
"""Issue a notification to the server. A notification is "fire and forget";
no response will be received and nothing is returned."""
LOGGER.debug( 'TX: Sending notification: %r', message )
self.WriteData( message )
def SendResponse( self, message ):
"""Send a response message. This is a message which is not a notification,
but still requires no further response from the server."""
LOGGER.debug( 'TX: Sending response: %r', message )
self.WriteData( message )
def AwaitServerConnection( self ):
"""Language server completer implementations should call this after starting
the server and the message pump (Start()) to await successful connection to
the server being established.
Returns no meaningful value, but may throw LanguageServerConnectionTimeout
in the event that the server does not connect promptly. In that case,
clients should shut down their server and reset their state."""
self._connection_event.wait( timeout = CONNECTION_TIMEOUT )
if not self._connection_event.is_set():
raise LanguageServerConnectionTimeout(
'Timed out waiting for server to connect' )
def _ReadMessages( self ):
"""Main message pump. Within the message pump thread context, reads messages
from the socket/stream by calling self.ReadData in a loop and dispatch
complete messages by calling self._DispatchMessage.
When the server is shut down cleanly, raises
LanguageServerConnectionStopped"""
data = bytes( b'' )
while True:
data, read_bytes, headers = self._ReadHeaders( data )
if 'Content-Length' not in headers:
# FIXME: We could try and recover this, but actually the message pump
# just fails.
raise ValueError( "Missing 'Content-Length' header" )
content_length = int( headers[ 'Content-Length' ] )
# We need to read content_length bytes for the payload of this message.
# This may be in the remainder of `data`, but equally we may need to read
# more data from the socket.
content = bytes( b'' )
content_read = 0
if read_bytes < len( data ):
# There are bytes left in data, use them
data = data[ read_bytes: ]
# Read up to content_length bytes from data
content_to_read = min( content_length, len( data ) )
content += data[ : content_to_read ]
content_read += len( content )
read_bytes = content_to_read
while content_read < content_length:
# There is more content to read, but data is exhausted - read more from
# the socket
data = self.ReadData( content_length - content_read )
content_to_read = min( content_length - content_read, len( data ) )
content += data[ : content_to_read ]
content_read += len( content )
read_bytes = content_to_read
LOGGER.debug( 'RX: Received message: %r', content )
# lsp will convert content to Unicode
self._DispatchMessage( lsp.Parse( content ) )
# We only consumed len( content ) of data. If there is more, we start
# again with the remainder and look for headers
data = data[ read_bytes : ]
def _ReadHeaders( self, data ):
"""Starting with the data in |data| read headers from the stream/socket
until a full set of headers has been consumed. Returns a tuple (
- data: any remaining unused data from |data| or the socket
- read_bytes: the number of bytes of returned data that have been consumed
- headers: a dictionary whose keys are the header names and whose values
are the header values
)"""
# LSP defines only 2 headers, of which only 1 is useful (Content-Length).
# Headers end with an empty line, and there is no guarantee that a single
# socket or stream read will contain only a single message, or even a whole
# message.
headers_complete = False
prefix = bytes( b'' )
headers = {}
while not headers_complete:
read_bytes = 0
last_line = 0
if len( data ) == 0:
data = self.ReadData()
while read_bytes < len( data ):
if utils.ToUnicode( data[ read_bytes: ] )[ 0 ] == '\n':
line = prefix + data[ last_line : read_bytes ].strip()
prefix = bytes( b'' )
last_line = read_bytes
if not line.strip():
headers_complete = True
read_bytes += 1
break
else:
try:
key, value = utils.ToUnicode( line ).split( ':', 1 )
headers[ key.strip() ] = value.strip()
except Exception:
LOGGER.exception( 'Received invalid protocol data from server: '
+ str( line ) )
raise
read_bytes += 1
if not headers_complete:
prefix = data[ last_line : ]
data = bytes( b'' )
return data, read_bytes, headers
def _ServerToClientRequest( self, request ):
method = request[ 'method' ]
if method == 'workspace/applyEdit':
self._collector.CollectApplyEdit( request, self )
elif method == 'client/registerCapability':
for reg in request[ 'params' ][ 'registrations' ]:
if reg[ 'method' ] == 'workspace/didChangeWatchedFiles':
globs = []
for watcher in reg[ 'registerOptions' ][ 'watchers' ]:
# TODO: Take care of watcher kinds. Not everything needs
# to be watched for create, modify *and* delete actions.
pattern = os.path.join( self._project_directory,
watcher[ 'globPattern' ] )
if os.path.isdir( pattern ):
pattern = os.path.join( pattern, '**' )
globs.append( pattern )
observer = Observer()
observer.schedule( self._watchdog_factory( globs ),
self._project_directory,
recursive = True )
observer.start()
self._observers.append( observer )
self.SendResponse( lsp.Void( request ) )
elif method == 'client/unregisterCapability':
for reg in request[ 'params' ][ 'unregisterations' ]:
if reg[ 'method' ] == 'workspace/didChangeWatchedFiles':
self._CancelWatchdogThreads()
self.SendResponse( lsp.Void( request ) )
else:
# Reject the request
self.SendResponse( lsp.Reject( request, lsp.Errors.MethodNotFound ) )
def _DispatchMessage( self, message ):
"""Called in the message pump thread context when a complete message was
read. For responses, calls the Response object's ResponseReceived method, or
for notifications (unsolicited messages from the server), simply accumulates
them in a Queue which is polled by the long-polling mechanism in
LanguageServerCompleter."""
if 'id' in message:
if 'method' in message:
# This is a server->client request, which requires a response.
self._ServerToClientRequest( message )
else:
# This is a response to the message with id message[ 'id' ]
with self._response_mutex:
message_id = message[ 'id' ]
assert message_id in self._responses
self._responses[ message_id ].ResponseReceived( message )
del self._responses[ message_id ]
else:
# This is a notification
self._AddNotificationToQueue( message )
# If there is an immediate (in-message-pump-thread) handler configured,
# call it.
if self._notification_handler:
try:
self._notification_handler( self, message )
except Exception:
LOGGER.exception( 'Handling message in poll thread failed: %s',
message )
def _AddNotificationToQueue( self, message ):
while True:
try:
self._notifications.put_nowait( message )
return
except queue.Full:
pass
# The queue (ring buffer) is full. This indicates either a slow
# consumer or the message poll is not running. In any case, rather than
# infinitely queueing, discard the oldest message and try again.
try:
self._notifications.get_nowait()
except queue.Empty:
# This is only a theoretical possibility to prevent this thread
# blocking in the unlikely event that all elements are removed from
# the queue between put_nowait and get_nowait. Unfortunately, this
# isn't testable without a debugger, so coverage will show up red.
pass # pragma: no cover
class StandardIOLanguageServerConnection( LanguageServerConnection ):
"""Concrete language server connection using stdin/stdout to communicate with
the server. This should be the default choice for concrete completers."""
def __init__( self,
project_directory,
watchdog_factory,
server_stdin,
server_stdout,
notification_handler = None ):
super().__init__( project_directory,
watchdog_factory,
notification_handler )
self._server_stdin = server_stdin
self._server_stdout = server_stdout
# NOTE: All access to the stdin/out objects must be synchronised due to the
# long-running `read` operations that are done on stdout, and how our
# shutdown request will come from another (arbitrary) thread. It is not
# legal in Python to close a stdio file while there is a pending read. This
# can lead to IOErrors due to "concurrent operations' on files.
# See https://stackoverflow.com/q/29890603/2327209
self._stdin_lock = threading.Lock()
self._stdout_lock = threading.Lock()
def TryServerConnectionBlocking( self ):
# standard in/out don't need to wait for the server to connect to us
return True
def Shutdown( self ):
super().Shutdown()
with self._stdin_lock:
if not self._server_stdin.closed:
self._server_stdin.close()
with self._stdout_lock:
if not self._server_stdout.closed:
self._server_stdout.close()
def WriteData( self, data ):
with self._stdin_lock:
self._server_stdin.write( data )
self._server_stdin.flush()
def ReadData( self, size=-1 ):
data = None
with self._stdout_lock:
if not self._server_stdout.closed:
if size > -1:
data = self._server_stdout.read( size )
else:
data = self._server_stdout.readline()
if not data:
# No data means the connection was severed. Connection severed when (not
# self.IsStopped()) means the server died unexpectedly.
if self.IsStopped():
raise LanguageServerConnectionStopped()
raise RuntimeError( "Connection to server died" )
return data
class LanguageServerCompleter( Completer ):
"""
Abstract completer implementation for Language Server Protocol. Concrete
implementations are required to:
- Handle downstream server state and create a LanguageServerConnection,
returning it in GetConnection
- Set its notification handler to self.GetDefaultNotificationHandler()
- See below for Startup/Shutdown instructions
- Optionally handle server-specific command responses in
HandleServerCommandResponse
- Optionally override GetCustomSubcommands to return subcommand handlers
that cannot be detected from the capabilities response.
- Optionally override AdditionalLogFiles for logs other than stderr
- Optionally override ExtraDebugItems for anything that should be in the
/debug_info response, that isn't covered by default
- Optionally override GetServerEnvironment if the server needs to be run
with specific environment variables.
- Implement the following Completer abstract methods:
- GetServerName
- GetCommandLine
- SupportedFiletypes
- DebugInfo
- Shutdown
- ServerIsHealthy : Return True if the server is _running_
- StartServer : Return True if the server was started.
- Optionally override methods to customise behavior:
- ConvertNotificationToMessage
- GetCompleterName
- GetProjectDirectory
- GetProjectRootFiles
- GetTriggerCharacters
- GetDefaultNotificationHandler
- HandleNotificationInPollThread
- Language
Startup
- Startup is initiated for you in OnFileReadyToParse
- The StartServer method is only called once (reset with ServerReset)
- See also LanguageServerConnection requirements
Shutdown
- Call ShutdownServer and wait for the downstream server to exit
- Call ServerReset to clear down state
- See also LanguageServerConnection requirements
Completions
- The implementation should not require any code to support completions
- (optional) Override GetCodepointForCompletionRequest if you wish to change
the completion position (e.g. if you want to pass the "query" to the
server)
Diagnostics
- The implementation should not require any code to support diagnostics
Sub-commands
- The sub-commands map is bespoke to the implementation, but generally, this
class attempts to provide all of the pieces where it can generically.
- By default, the subcommands are detected from the server's capabilities.
The logic for this is in DEFAULT_SUBCOMMANDS_MAP (and implemented by
_DiscoverSubcommandSupport).
- By default FixIt should work, but for example, jdt.ls doesn't implement
CodeActions correctly and forces clients to handle it differently.
For these cases, completers can override any of:
- CodeActionLiteralToFixIt
- CodeActionCommandToFixIt
- CommandToFixIt
- Other commands not covered by DEFAULT_SUBCOMMANDS_MAP are bespoke to the
completer and should be returned by GetCustomSubcommands:
- GetType/GetDoc are bespoke to the downstream server, though this class
provides GetHoverResponse which is useful in this context.
GetCustomSubcommands needs not contain GetType/GetDoc if the member
functions implementing GetType/GetDoc are named GetType/GetDoc.
"""
def GetConnection( self ):
"""Method that can be implemented by derived classes to return an instance
of LanguageServerConnection appropriate for the language server in
question"""
return self._connection
def HandleServerCommandResponse( self,
request_data,
edits,
command_response ):
pass # pragma: no cover
def __init__( self, user_options ):
super().__init__( user_options )
# _server_info_mutex synchronises access to the state of the
# LanguageServerCompleter object. There are a number of threads at play
# here which might want to change properties of this object:
# - Each client request (handled by concrete completers) executes in a
# separate thread and might call methods requiring us to synchronise the
# server's view of file state with our own. We protect from clobbering
# by doing all server-file-state operations under this mutex.
# - There are certain events that we handle in the message pump thread.
# These include diagnostics and some parts of initialization. We must
# protect against concurrent access to our internal state (such as the
# server file state, and stored data about the server itself) when we
# are calling methods on this object from the message pump). We
# synchronise on this mutex for that.
# - We need to make sure that multiple client requests dont try to start
# or stop the server simultaneously, so we also do all server
# start/stop/etc. operations under this mutex
self._server_info_mutex = threading.Lock()
self.ServerReset()
# LSP allows servers to return an incomplete list of completions. The cache
# cannot be used in that case and the current column must be sent to the
# language server for the subsequent completion requests; otherwise, the
# server will return the same incomplete list. When that list is complete,
# two cases are considered:
# - the starting column was sent to the server: cache is valid for the
# whole completion;
# - the current column was sent to the server: cache stays valid while the
# cached query is a prefix of the subsequent queries.
self._completions_cache = LanguageServerCompletionsCache()
self._completer_name = self.__class__.__name__.replace( 'Completer', '' )
self._language = self._completer_name.lower()
self._on_file_ready_to_parse_handlers = []
self.RegisterOnFileReadyToParse(
lambda self, request_data:
self._UpdateServerWithFileContents( request_data )
)
self._signature_help_disabled = user_options[ 'disable_signature_help' ]
self._server_keep_logfiles = user_options[ 'server_keep_logfiles' ]
self._stderr_file = None
self._Reset()
def _Reset( self ):
self.ServerReset()
self._connection = None
self._server_handle = None
if not self._server_keep_logfiles and self._stderr_file:
utils.RemoveIfExists( self._stderr_file )
self._stderr_file = None
def ServerReset( self ):
"""Clean up internal state related to the running server instance.
Implementations are required to call this after disconnection and killing
the downstream server."""
self._server_file_state = lsp.ServerFileStateStore()
self._latest_diagnostics = collections.defaultdict( list )
self._sync_type = 'Full'
self._initialize_response = None
self._initialize_event = threading.Event()
self._on_initialize_complete_handlers = []
self._server_capabilities = None
self._is_completion_provider = False
self._resolve_completion_items = False
self._project_directory = None
self._settings = {}
self._extra_conf_dir = None
self._server_started = False
def GetCompleterName( self ):
return self._completer_name
def Language( self ):
"""Returns the string used to identify the language in user's
.ycm_extra_conf.py file. Default to the completer name in lower case."""
return self._language
def StartServer( self, request_data ):
try:
with self._server_info_mutex:
return self._StartServerNoLock( request_data )
except LanguageServerConnectionTimeout:
LOGGER.error( '%s failed to start, or did not connect successfully',
self.GetServerName() )
self.Shutdown()
return False
def _StartServerNoLock( self, request_data ):
LOGGER.info( 'Starting %s: %s',
self.GetServerName(),
self.GetCommandLine() )
self._stderr_file = utils.CreateLogfile( '{}_stderr'.format(
utils.MakeSafeFileNameString( self.GetServerName() ) ) )
with utils.OpenForStdHandle( self._stderr_file ) as stderr:
self._server_handle = utils.SafePopen(
self.GetCommandLine(),
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = stderr,
env = self.GetServerEnvironment() )
self._project_directory = self.GetProjectDirectory( request_data )
self._connection = (
StandardIOLanguageServerConnection(
self._project_directory,
lambda globs: WatchdogHandler( self, globs ),
self._server_handle.stdin,
self._server_handle.stdout,
self.GetDefaultNotificationHandler() )
)
self._connection.Start()
self._connection.AwaitServerConnection()
LOGGER.info( '%s started', self.GetServerName() )
return True
def Shutdown( self ):
with self._server_info_mutex:
LOGGER.info( 'Shutting down %s...', self.GetServerName() )
# Tell the connection to expect the server to disconnect
if self._connection:
self._connection.Stop()
if not self.ServerIsHealthy():
LOGGER.info( '%s is not running', self.GetServerName() )
self._Reset()
return
LOGGER.info( 'Stopping %s with PID %s',
self.GetServerName(),
self._server_handle.pid )
try:
with self._server_info_mutex:
self.ShutdownServer()
# By this point, the server should have shut down and terminated. To
# ensure that isn't blocked, we close all of our connections and wait
# for the process to exit.
#
# If, after a small delay, the server has not shut down we do NOT kill
# it; we expect that it will shut itself down eventually. This is
# predominantly due to strange process behaviour on Windows.
# NOTE: While waiting for the connection to close, we must _not_ hold any
# locks (in fact, we must not hold locks that might be needed when
# processing messages in the poll thread - i.e. notifications).
# This is crucial, as the server closing (asyncronously) might
# involve _other activities_ if there are messages in the queue (e.g. on
# the socket) and we need to store/handle them in the message pump
# (such as notifications) or even the initialise response.
if self._connection:
# Actually this sits around waiting for the connection thraed to exit
self._connection.Close()
with self._server_info_mutex:
utils.WaitUntilProcessIsTerminated( self._server_handle,
timeout = 15 )
LOGGER.info( '%s stopped', self.GetServerName() )
except Exception:
LOGGER.exception( 'Error while stopping %s', self.GetServerName() )
# We leave the process running. Hopefully it will eventually die of its
# own accord.
with self._server_info_mutex:
# Tidy up our internal state, even if the completer server didn't close
# down cleanly.
self._Reset()
def ShutdownServer( self ):
"""Send the shutdown and possibly exit request to the server.
Implementations must call this prior to closing the LanguageServerConnection
or killing the downstream server."""
# Language server protocol requires orderly shutdown of the downstream
# server by first sending a shutdown request, and on its completion sending
# and exit notification (which does not receive a response). Some buggy
# servers exit on receipt of the shutdown request, so we handle that too.
if self._ServerIsInitialized():
request_id = self.GetConnection().NextRequestId()
msg = lsp.Shutdown( request_id )
try:
self.GetConnection().GetResponse( request_id,
msg,
REQUEST_TIMEOUT_INITIALISE )
except ResponseAbortedException:
# When the language server (heinously) dies handling the shutdown
# request, it is aborted. Just return - we're done.
return
except Exception:
# Ignore other exceptions from the server and send the exit request
# anyway
LOGGER.exception( 'Shutdown request failed. Ignoring' )
if self.ServerIsHealthy():
self.GetConnection().SendNotification( lsp.Exit() )
# If any threads are waiting for the initialize exchange to complete,
# release them, as there is no chance of getting a response now.
if ( self._initialize_response is not None and
not self._initialize_event.is_set() ):
self._initialize_response = None
self._initialize_event.set()
def _RestartServer( self, request_data, *args, **kwargs ):
self.Shutdown()
self._StartAndInitializeServer( request_data, *args, **kwargs )
def _ServerIsInitialized( self ):
"""Returns True if the server is running and the initialization exchange has
completed successfully. Implementations must not issue requests until this
method returns True."""
if not self.ServerIsHealthy():
return False
if self._initialize_event.is_set():
# We already got the initialize response
return True
if self._initialize_response is None:
# We never sent the initialize response
return False
# Initialize request in progress. Will be handled asynchronously.
return False
def ServerIsHealthy( self ):
return utils.ProcessIsRunning( self._server_handle )
def ServerIsReady( self ):
return self._ServerIsInitialized()
def ShouldUseNowInner( self, request_data ):
# We should only do _anything_ after the initialize exchange has completed.
return ( self.ServerIsReady() and
super().ShouldUseNowInner( request_data ) )
def GetCodepointForCompletionRequest( self, request_data ):
"""Returns the 1-based codepoint offset on the current line at which to make
the completion request"""
return self._completions_cache.GetCodepointForCompletionRequest(
request_data )
def ComputeCandidatesInner( self, request_data, codepoint ):
if not self._is_completion_provider:
return None, False
self._UpdateServerWithFileContents( request_data )
request_id = self.GetConnection().NextRequestId()
msg = lsp.Completion( request_id, request_data, codepoint )
response = self.GetConnection().GetResponse( request_id,
msg,
REQUEST_TIMEOUT_COMPLETION )
result = response.get( 'result' ) or []
if isinstance( result, list ):
items = result
is_incomplete = False
else:
items = result[ 'items' ]
is_incomplete = result[ 'isIncomplete' ]
# Note: _CandidatesFromCompletionItems does a lot of work on the actual
# completion text to ensure that the returned text and start_codepoint are
# applicable to our model of a single start column.
#
# Unfortunately (perhaps) we have to do this both here and in
# DetailCandidates when resolve is required. This is because the filtering
# should be based on ycmd's version of the insertion_text. Fortunately it's
# likely much quicker to do the simple calculations inline rather than a
# series of potentially many blocking server round trips.
return ( self._CandidatesFromCompletionItems( items,
False, # don't do resolve
request_data ),
is_incomplete )
def _GetCandidatesFromSubclass( self, request_data ):
cache_completions = self._completions_cache.GetCompletionsIfCacheValid(
request_data )
if cache_completions:
return cache_completions
codepoint = self.GetCodepointForCompletionRequest( request_data )
raw_completions, is_incomplete = self.ComputeCandidatesInner( request_data,
codepoint )
self._completions_cache.Update( request_data,
raw_completions,
is_incomplete )
return raw_completions
def DetailCandidates( self, request_data, completions ):
if not self._resolve_completion_items:
# We already did all of the work.
return completions
# Note: _CandidatesFromCompletionItems does a lot of work on the actual
# completion text to ensure that the returned text and start_codepoint are
# applicable to our model of a single start column.
#
# While we did this before, this time round we will have much better data to
# do it on, and the new calculated value is dependent on the set of filtered
# data, possibly leading to significantly smaller overlap with existing
# text. See the fixup algorithm for more details on that.
return self._CandidatesFromCompletionItems(
[ c[ 'extra_data' ][ 'item' ] for c in completions ],
True, # Do a full resolve
request_data )
def _ResolveCompletionItem( self, item ):
try:
resolve_id = self.GetConnection().NextRequestId()
resolve = lsp.ResolveCompletion( resolve_id, item )
response = self.GetConnection().GetResponse(
resolve_id,
resolve,
REQUEST_TIMEOUT_COMPLETION )
item.clear()
item.update( response[ 'result' ] )
except ResponseFailedException:
LOGGER.exception( 'A completion item could not be resolved. Using '
'basic data' )
return item
def _ShouldResolveCompletionItems( self ):
# We might not actually need to issue the resolve request if the server
# claims that it doesn't support it. However, we still might need to fix up
# the completion items.
return ( self._server_capabilities.get( 'completionProvider' ) or {} ).get(
'resolveProvider', False )
def _CandidatesFromCompletionItems( self, items, resolve, request_data ):
"""Issue the resolve request for each completion item in |items|, then fix
up the items such that a single start codepoint is used."""
#
# Important note on the following logic:
#
# Language server protocol requires that clients support textEdits in
# completion items. It imposes some restrictions on the textEdit, namely:
# * the edit range must cover at least the original requested position,
# * and that it is on a single line.
#
# We only get textEdits (usually) for items which were successfully
# resolved. Otherwise we just get insertion text, which might overlap the
# existing text.
#
# Importantly there is no restriction that all edits start and end at the
# same point.
#
# ycmd protocol only supports a single start column, so we must post-process
# the completion items to work out a single start column to use, as follows:
# * read all completion items text and start codepoint and store them
# * store the minimum start codepoint encountered
# * go back through the completion items and modify them so that they
# contain enough text to start from the minimum start codepoint
# * set the completion start codepoint to the minimum start point
#
# The last part involves reading the original source text and padding out
# completion items so that they all start at the same point.
#
# This is neither particularly pretty nor efficient, but it is necessary.
# Significant completions, such as imports, do not work without it in
# jdt.ls.
#
completions = []
start_codepoints = []
unique_start_codepoints = []
min_start_codepoint = request_data[ 'start_codepoint' ]
# First generate all of the completion items and store their
# start_codepoints. Then, we fix-up the completion texts to use the
# earliest start_codepoint by borrowing text from the original line.
for item in items:
if resolve and not item.get( '_resolved', False ):
self._ResolveCompletionItem( item )
item[ '_resolved' ] = True
try:
insertion_text, extra_data, start_codepoint = (
_InsertionTextForItem( request_data, item ) )
except IncompatibleCompletionException:
LOGGER.exception( 'Ignoring incompatible completion suggestion %s',
item )
continue
if not resolve and self._resolve_completion_items:
# Store the actual item in the extra_data area of the completion item.
# We'll use this later to do the full resolve.
extra_data = {} if extra_data is None else extra_data
extra_data[ 'item' ] = item
min_start_codepoint = min( min_start_codepoint, start_codepoint )
# Build a ycmd-compatible completion for the text as we received it. Later
# we might modify insertion_text should we see a lower start codepoint.
completions.append( _CompletionItemToCompletionData(
insertion_text,
item,
extra_data ) )
start_codepoints.append( start_codepoint )
if start_codepoint not in unique_start_codepoints:
unique_start_codepoints.append( start_codepoint )
if ( len( completions ) > 1 and
len( unique_start_codepoints ) > 1 and
min_start_codepoint != request_data[ 'start_codepoint' ] ):
# We need to fix up the completions, go do that
return _FixUpCompletionPrefixes( completions,
start_codepoints,
request_data,
min_start_codepoint )
request_data[ 'start_codepoint' ] = min_start_codepoint
return completions
def SignatureHelpAvailable( self ):
if self._signature_help_disabled:
return responses.SignatureHelpAvailalability.NOT_AVAILABLE
if not self.ServerIsReady():
return responses.SignatureHelpAvailalability.PENDING
if bool( self._server_capabilities.get( 'signatureHelpProvider' ) ):
return responses.SignatureHelpAvailalability.AVAILABLE
else:
return responses.SignatureHelpAvailalability.NOT_AVAILABLE
def ComputeSignaturesInner( self, request_data ):
if not self.ServerIsReady():
return {}
if not self._server_capabilities.get( 'signatureHelpProvider' ):
return {}
self._UpdateServerWithFileContents( request_data )
request_id = self.GetConnection().NextRequestId()
msg = lsp.SignatureHelp( request_id, request_data )
response = self.GetConnection().GetResponse( request_id,
msg,
REQUEST_TIMEOUT_COMPLETION )
result = response[ 'result' ]
if result is None:
return {}
for sig in result[ 'signatures' ]:
sig_label = sig[ 'label' ]
end = 0
if sig.get( 'parameters' ) is None:
sig[ 'parameters' ] = []
for arg in sig[ 'parameters' ]:
arg_label = arg[ 'label' ]
assert not isinstance( arg_label, list )
begin = sig[ 'label' ].find( arg_label, end )
end = begin + len( arg_label )
arg[ 'label' ] = [
utils.CodepointOffsetToByteOffset( sig_label, begin ),
utils.CodepointOffsetToByteOffset( sig_label, end ) ]
return result
def GetDetailedDiagnostic( self, request_data ):
self._UpdateServerWithFileContents( request_data )
current_line_lsp = request_data[ 'line_num' ] - 1
current_file = request_data[ 'filepath' ]
if not self._latest_diagnostics:
return responses.BuildDisplayMessageResponse(
'Diagnostics are not ready yet.' )
with self._server_info_mutex:
diagnostics = list( self._latest_diagnostics[
lsp.FilePathToUri( current_file ) ] )
if not diagnostics:
return responses.BuildDisplayMessageResponse(
'No diagnostics for current file.' )
current_column = lsp.CodepointsToUTF16CodeUnits(
GetFileLines( request_data, current_file )[ current_line_lsp ],
request_data[ 'column_codepoint' ] )
minimum_distance = None
message = 'No diagnostics for current line.'
for diagnostic in diagnostics:
start = diagnostic[ 'range' ][ 'start' ]
end = diagnostic[ 'range' ][ 'end' ]
if current_line_lsp < start[ 'line' ] or end[ 'line' ] < current_line_lsp:
continue
point = { 'line': current_line_lsp, 'character': current_column }
distance = _DistanceOfPointToRange( point, diagnostic[ 'range' ] )
if minimum_distance is None or distance < minimum_distance:
message = diagnostic[ 'message' ]
if distance == 0:
break
minimum_distance = distance
return responses.BuildDisplayMessageResponse( message )
@abc.abstractmethod
def GetServerName( self ):
""" A string representing a human readable name of the server."""
pass # pragma: no cover
def GetServerEnvironment( self ):
""" None or a dictionary containing the environment variables. """
return None
@abc.abstractmethod
def GetCommandLine( self ):
""" An override in a concrete class needs to return a list of cli arguments
for starting the LSP server."""
pass # pragma: no cover
def AdditionalLogFiles( self ):
""" Returns the list of server logs other than stderr. """
return []
def ExtraDebugItems( self, request_data ):
""" A list of DebugInfoItems """
return []
def DebugInfo( self, request_data ):
with self._server_info_mutex:
extras = self.CommonDebugItems() + self.ExtraDebugItems( request_data )
logfiles = [ self._stderr_file ] + self.AdditionalLogFiles()
server = responses.DebugInfoServer( name = self.GetServerName(),
handle = self._server_handle,
executable = self.GetCommandLine(),
logfiles = logfiles,
extras = extras )
return responses.BuildDebugInfoResponse( name = self.GetCompleterName(),
servers = [ server ] )
def GetCustomSubcommands( self ):
"""Return a list of subcommand definitions to be used in conjunction with
the subcommands detected by _DiscoverSubcommandSupport. The return is a dict
whose keys are the subcommand and whose values are either:
- a callable, as compatible with GetSubcommandsMap, or
- a dict, compatible with DEFAULT_SUBCOMMANDS_MAP including a checker and
a callable.
If there are no custom subcommands, an empty dict should be returned."""
return {}
def GetSubcommandsMap( self ):
commands = {}
commands.update( DEFAULT_SUBCOMMANDS_MAP )
commands.update( {
'StopServer': (
lambda self, request_data, args: self.Shutdown()
),
'RestartServer': (
lambda self, request_data, args: self._RestartServer( request_data )
),
} )
if hasattr( self, 'GetDoc' ):
commands[ 'GetDoc' ] = (
lambda self, request_data, args: self.GetDoc( request_data )
)
if hasattr( self, 'GetType' ):
commands[ 'GetType' ] = (
lambda self, request_data, args: self.GetType( request_data )
)
commands.update( self.GetCustomSubcommands() )
return self._DiscoverSubcommandSupport( commands )
def _GetSubcommandProvider( self, provider_list ):
if not self._server_capabilities:
LOGGER.warning( "Can't determine subcommands: not initialized yet" )
capabilities = {}
else:
capabilities = self._server_capabilities
for providers in provider_list:
if isinstance( providers, tuple ):
if all( capabilities.get( provider ) for provider in providers ):
return providers
if capabilities.get( providers ):
return providers
return None
def _DiscoverSubcommandSupport( self, commands ):
subcommands_map = {}
for command, handler in commands.items():
if isinstance( handler, list ):
provider = self._GetSubcommandProvider( handler )
if provider:
LOGGER.info( 'Found %s support for command %s in %s',
provider,
command,
self.Language() )
subcommands_map[ command ] = PROVIDERS_MAP[ provider ]
else:
LOGGER.info( 'No support for %s command in server for %s',
command,
self.Language() )
else:
LOGGER.info( 'Always supporting %s for %s',
command,
self.Language() )
subcommands_map[ command ] = handler
return subcommands_map
def DefaultSettings( self, request_data ):
return {}
def GetSettings( self, module, request_data ):
if hasattr( module, 'Settings' ):
settings = module.Settings(
language = self.Language(),
filename = request_data[ 'filepath' ],
client_data = request_data[ 'extra_conf_data' ] )
if settings is not None:
return settings
LOGGER.debug( 'No Settings function defined in %s', module.__file__ )
return {}
def _GetSettingsFromExtraConf( self, request_data ):
# The DefaultSettings method returns only the 'language server" ('ls')
# settings, but self._settings is a wider dict containing a 'ls' key and any
# other keys that we might want to add (e.g. 'project_directory',
# 'capabilities', etc.)
merged_ls_settings = self.DefaultSettings( request_data )
# If there is no extra-conf, the total settings are just the defaults:
self._settings = {
'ls': merged_ls_settings
}
module = extra_conf_store.ModuleForSourceFile( request_data[ 'filepath' ] )
if module:
# The user-defined settings may contain a 'ls' key, which override (merge
# with) the DefaultSettings, and any other keys we specify generically for
# all LSP-based completers (such as 'project_directory').
user_settings = self.GetSettings( module, request_data )
# Merge any user-supplied 'ls' settings with the defaults
if 'ls' in user_settings:
merged_ls_settings.update( user_settings[ 'ls' ] )
user_settings[ 'ls' ] = merged_ls_settings
self._settings = user_settings
# Only return the dir if it was found in the paths; we don't want to use
# the path of the global extra conf as a project root dir.
if not extra_conf_store.IsGlobalExtraConfModule( module ):
LOGGER.debug( 'Using path %s for extra_conf_dir',
os.path.dirname( module.__file__ ) )
return os.path.dirname( module.__file__ )
# No local extra conf
return None
def _StartAndInitializeServer( self, request_data, *args, **kwargs ):
"""Starts the server and sends the initialize request, assuming the start is
successful. |args| and |kwargs| are passed through to the underlying call to
StartServer. In general, completers don't need to call this as it is called
automatically in OnFileReadyToParse, but this may be used in completer
subcommands that require restarting the underlying server."""
self._extra_conf_dir = self._GetSettingsFromExtraConf( request_data )
# Only attempt to start the server once. Set this after above call as it may
# throw an exception
self._server_started = True
if self.StartServer( request_data, *args, **kwargs ):
self._SendInitialize( request_data )
def OnFileReadyToParse( self, request_data ):
if not self.ServerIsHealthy() and not self._server_started:
# We have to get the settings before starting the server, as this call
# might throw UnknownExtraConf.
self._StartAndInitializeServer( request_data )
if not self.ServerIsHealthy():
return
# If we haven't finished initializing yet, we need to queue up all functions
# registered on the FileReadyToParse event and in particular
# _UpdateServerWithFileContents in reverse order of registration. This
# ensures that the server is up to date as soon as we are able to send more
# messages. This is important because server start up can be quite slow and
# we must not block the user, while we must keep the server synchronized.
if not self._initialize_event.is_set():
for handler in reversed( self._on_file_ready_to_parse_handlers ):
self._OnInitializeComplete( partial( handler,
request_data = request_data ) )
return
for handler in reversed( self._on_file_ready_to_parse_handlers ):
handler( self, request_data )
# Return the latest diagnostics that we have received.
#
# NOTE: We also return diagnostics asynchronously via the long-polling
# mechanism to avoid timing issues with the servers asynchronous publication
# of diagnostics.
#
# However, we _also_ return them here to refresh diagnostics after, say
# changing the active file in the editor, or for clients not supporting the
# polling mechanism.
filepath = request_data[ 'filepath' ]
uri = lsp.FilePathToUri( filepath )
contents = GetFileLines( request_data, filepath )
with self._server_info_mutex:
if uri in self._latest_diagnostics:
diagnostics = [ _BuildDiagnostic( contents, uri, diag )
for diag in self._latest_diagnostics[ uri ] ]
return responses.BuildDiagnosticResponse(
diagnostics, filepath, self.max_diagnostics_to_display )
def PollForMessagesInner( self, request_data, timeout ):
# If there are messages pending in the queue, return them immediately
messages = self._GetPendingMessages( request_data )
if messages:
return messages
# Otherwise, block until we get one or we hit the timeout.
return self._AwaitServerMessages( request_data, timeout )
def _GetPendingMessages( self, request_data ):
"""Convert any pending notifications to messages and return them in a list.
If there are no messages pending, returns an empty list. Returns False if an
error occurred and no further polling should be attempted."""
messages = []
if not self._initialize_event.is_set():
# The request came before we started up, there cannot be any messages
# pending, and in any case they will be handled later.
return messages
try:
while True:
if not self.GetConnection():
# The server isn't running or something. Don't re-poll.
return False
notification = self.GetConnection()._notifications.get_nowait()
message = self.ConvertNotificationToMessage( request_data,
notification )
if message:
messages.append( message )
except queue.Empty:
# We drained the queue
pass
return messages
def _AwaitServerMessages( self, request_data, timeout ):
"""Block until either we receive a notification, or a timeout occurs.
Returns one of the following:
- a list containing a single message
- True if a timeout occurred, and the poll should be restarted
- False if an error occurred, and no further polling should be attempted
"""
try:
while True:
if not self._initialize_event.is_set():
# The request came before we started up, wait for startup to complete,
# then tell the client to re-send the request. Note, we perform this
# check on every iteration, as the server may be legitimately
# restarted while this loop is running.
self._initialize_event.wait( timeout=timeout )
# If the timeout is hit waiting for the server to be ready, we return
# False and kill the message poll.
return self._initialize_event.is_set()
if not self.GetConnection():
# The server isn't running or something. Don't re-poll, as this will
# just cause errors.
return False
notification = self.GetConnection()._notifications.get(
timeout = timeout )
message = self.ConvertNotificationToMessage( request_data,
notification )
if message:
return [ message ]
except queue.Empty:
return True
def GetDefaultNotificationHandler( self ):
"""Return a notification handler method suitable for passing to
LanguageServerConnection constructor"""
def handler( server, notification ):
self.HandleNotificationInPollThread( notification )
return handler
def HandleNotificationInPollThread( self, notification ):
"""Called by the LanguageServerConnection in its message pump context when a
notification message arrives."""
if notification[ 'method' ] == 'textDocument/publishDiagnostics':
# Some clients might not use a message poll, so we must store the
# diagnostics and return them in OnFileReadyToParse. We also need these
# for correct FixIt handling, as they are part of the FixIt context.
params = notification[ 'params' ]
# Since percent-encoded strings are not cannonical, they can choose to use
# upper case or lower case letters, also there are some characters that
# can be encoded or not. Therefore, we convert them back and forth
# according to our implementation to make sure they are in a cannonical
# form for access later on.
try:
uri = lsp.FilePathToUri( lsp.UriToFilePath( params[ 'uri' ] ) )
except lsp.InvalidUriException:
# Ignore diagnostics for URIs we don't recognise
LOGGER.exception( 'Ignoring diagnostics for unrecognized URI' )
return
with self._server_info_mutex:
self._latest_diagnostics[ uri ] = params[ 'diagnostics' ]
def ConvertNotificationToMessage( self, request_data, notification ):
"""Convert the supplied server notification to a ycmd message. Returns None
if the notification should be ignored.
Implementations may override this method to handle custom notifications, but
must always call the base implementation for unrecognized notifications."""
if notification[ 'method' ] == 'window/showMessage':
return responses.BuildDisplayMessageResponse(
notification[ 'params' ][ 'message' ] )
if notification[ 'method' ] == 'textDocument/publishDiagnostics':
params = notification[ 'params' ]
uri = params[ 'uri' ]
try:
filepath = lsp.UriToFilePath( uri )
except lsp.InvalidUriException:
LOGGER.exception( 'Ignoring diagnostics for unrecognized URI' )
return None
with self._server_info_mutex:
if filepath in self._server_file_state:
contents = utils.SplitLines(
self._server_file_state[ filepath ].contents )
else:
contents = GetFileLines( request_data, filepath )
diagnostics = [ _BuildDiagnostic( contents, uri, x )
for x in params[ 'diagnostics' ] ]
return {
'diagnostics': responses.BuildDiagnosticResponse(
diagnostics, filepath, self.max_diagnostics_to_display ),
'filepath': filepath
}
if notification[ 'method' ] == 'window/logMessage':
log_level = [
None, # 1-based enum from LSP
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG,
]
params = notification[ 'params' ]
LOGGER.log( log_level[ int( params[ 'type' ] ) ],
'Server reported: %s',
params[ 'message' ] )
return None
def _AnySupportedFileType( self, file_types ):
for supported in self.SupportedFiletypes():
if supported in file_types:
return True
return False
def _UpdateServerWithFileContents( self, request_data ):
"""Update the server with the current contents of all open buffers, and
close any buffers no longer open.
This method should be called frequently and in any event before a
synchronous operation."""
with self._server_info_mutex:
self._UpdateDirtyFilesUnderLock( request_data )
files_to_purge = self._UpdateSavedFilesUnderLock( request_data )
self._PurgeMissingFilesUnderLock( files_to_purge )
def _UpdateDirtyFilesUnderLock( self, request_data ):
for file_name, file_data in request_data[ 'file_data' ].items():
if not self._AnySupportedFileType( file_data[ 'filetypes' ] ):
LOGGER.debug( 'Not updating file %s, it is not a supported filetype: '
'%s not in %s',
file_name,
file_data[ 'filetypes' ],
self.SupportedFiletypes() )
continue
file_state = self._server_file_state[ file_name ]
action = file_state.GetDirtyFileAction( file_data[ 'contents' ] )
LOGGER.debug( 'Refreshing file %s: State is %s/action %s',
file_name,
file_state.state,
action )
if action == lsp.ServerFileState.OPEN_FILE:
msg = lsp.DidOpenTextDocument( file_state,
file_data[ 'filetypes' ],
file_data[ 'contents' ] )
self.GetConnection().SendNotification( msg )
elif action == lsp.ServerFileState.CHANGE_FILE:
# FIXME: DidChangeTextDocument doesn't actually do anything
# different from DidOpenTextDocument other than send the right
# message, because we don't actually have a mechanism for generating
# the diffs. This isn't strictly necessary, but might lead to
# performance problems.
msg = lsp.DidChangeTextDocument( file_state, file_data[ 'contents' ] )
self.GetConnection().SendNotification( msg )
def _UpdateSavedFilesUnderLock( self, request_data ):
files_to_purge = []
for file_name, file_state in self._server_file_state.items():
if file_name in request_data[ 'file_data' ]:
continue
# We also need to tell the server the contents of any files we have said
# are open, but are not 'dirty' in the editor. This is because after
# sending a didOpen notification, we own the contents of the file.
#
# So for any file that is in the server map, and open, but not supplied in
# the request, we check to see if its on-disk contents match the latest in
# the server. If they don't, we send an update.
#
# FIXME: This is really inefficient currently, as it reads the entire file
# on every update. It might actually be better to close files which have
# been saved and are no longer "dirty", though that would likely be less
# efficient for downstream servers which cache e.g. AST.
try:
contents = GetFileContents( request_data, file_name )
except IOError:
LOGGER.exception( 'Error getting contents for open file: %s',
file_name )
# The file no longer exists (it might have been a temporary file name)
# or it is no longer accessible, so we should state that it is closed.
# If it were still open it would have been in the request_data.
#
# We have to do this in a separate loop because we can't change
# self._server_file_state while iterating it.
files_to_purge.append( file_name )
continue
action = file_state.GetSavedFileAction( contents )
if action == lsp.ServerFileState.CHANGE_FILE:
msg = lsp.DidChangeTextDocument( file_state, contents )
self.GetConnection().SendNotification( msg )
return files_to_purge
def _PurgeMissingFilesUnderLock( self, files_to_purge ):
# ycmd clients only send buffers which have changed, and are required to
# send BufferUnload autocommand when files are closed.
for file_name in files_to_purge:
self._PurgeFileFromServer( file_name )
def OnFileSave( self, request_data ):
if not self.ServerIsReady():
return
if 'textDocumentSync' in self._server_capabilities:
sync = self._server_capabilities[ 'textDocumentSync' ]
if isinstance( sync, dict ) and sync.get( 'save' ) not in [ None, False ]:
save = sync[ 'save' ]
file_name = request_data[ 'filepath' ]
contents = None
if isinstance( save, dict ) and save.get( 'includeText' ):
contents = request_data[ 'file_data' ][ file_name ][ 'contents' ]
file_state = self._server_file_state[ file_name ]
msg = lsp.DidSaveTextDocument( file_state, contents )
self.GetConnection().SendNotification( msg )
def OnBufferUnload( self, request_data ):
if not self.ServerIsHealthy():
return
# If we haven't finished initializing yet, we need to queue up a call to
# _PurgeFileFromServer. This ensures that the server is up to date
# as soon as we are able to send more messages. This is important because
# server start up can be quite slow and we must not block the user, while we
# must keep the server synchronized.
if not self._initialize_event.is_set():
self._OnInitializeComplete(
lambda self: self._PurgeFileFromServer( request_data[ 'filepath' ] ) )
return
self._PurgeFileFromServer( request_data[ 'filepath' ] )
def _PurgeFileFromServer( self, file_path ):
file_state = self._server_file_state[ file_path ]
action = file_state.GetFileCloseAction()
if action == lsp.ServerFileState.CLOSE_FILE:
msg = lsp.DidCloseTextDocument( file_state )
self.GetConnection().SendNotification( msg )
del self._server_file_state[ file_state.filename ]
def GetProjectRootFiles( self ):
"""Returns a list of files that indicate the root of the project.
It should be easier to override just this method than the whole
GetProjectDirectory."""
return []
def GetProjectDirectory( self, request_data ):
"""Return the directory in which the server should operate. Language server
protocol and most servers have a concept of a 'project directory'. Where a
concrete completer can detect this better, it should override this method,
but otherwise, we default as follows:
- If the user specified 'project_directory' in their extra conf
'Settings', use that.
- try to find files from GetProjectRootFiles and use the
first directory from there
- if there's an extra_conf file, use that directory
- otherwise if we know the client's cwd, use that
- otherwise use the diretory of the file that we just opened
Note: None of these are ideal. Ycmd doesn't really have a notion of project
directory and therefore neither do any of our clients.
NOTE: Must be called _after_ _GetSettingsFromExtraConf, as it uses
self._settings and self._extra_conf_dir
"""
if 'project_directory' in self._settings:
return utils.AbsoluatePath( self._settings[ 'project_directory' ],
self._extra_conf_dir )
project_root_files = self.GetProjectRootFiles()
if project_root_files:
for folder in utils.PathsToAllParentFolders( request_data[ 'filepath' ] ):
for root_file in project_root_files:
if os.path.isfile( os.path.join( folder, root_file ) ):
return folder
if self._extra_conf_dir:
return self._extra_conf_dir
if 'working_dir' in request_data:
return request_data[ 'working_dir' ]
return os.path.dirname( request_data[ 'filepath' ] )
def _SendInitialize( self, request_data ):
"""Sends the initialize request asynchronously.
This must be called immediately after establishing the connection with the
language server. Implementations must not issue further requests to the
server until the initialize exchange has completed. This can be detected by
calling this class's implementation of _ServerIsInitialized.
_GetSettingsFromExtraConf must be called before calling this method, as this
method release on self._extra_conf_dir.
It is called before starting the server in OnFileReadyToParse."""
with self._server_info_mutex:
assert not self._initialize_response
request_id = self.GetConnection().NextRequestId()
# FIXME: According to the discussion on
# https://github.com/Microsoft/language-server-protocol/issues/567
# the settings on the Initialize request are somehow subtly different from
# the settings supplied in didChangeConfiguration, though it's not exactly
# clear how/where that is specified.
msg = lsp.Initialize( request_id,
self._project_directory,
self._settings.get( 'ls', {} ) )
def response_handler( response, message ):
if message is None:
return
self._HandleInitializeInPollThread( message )
self._initialize_response = self.GetConnection().GetResponseAsync(
request_id,
msg,
response_handler )
def GetTriggerCharacters( self, server_trigger_characters ):
"""Given the server trigger characters supplied in the initialize response,
returns the trigger characters to merge with the ycmd-defined ones. By
default, all server trigger characters are merged in. Note this might not be
appropriate in all cases as ycmd's own triggering mechanism is more
sophisticated (regex based) than LSP's (single character). If the
server-supplied single-character triggers are not useful, override this
method to return an empty list or None."""
return server_trigger_characters
def GetSignatureTriggerCharacters( self, server_trigger_characters ):
"""Same as _GetTriggerCharacters but for signature help."""
return server_trigger_characters
def _HandleInitializeInPollThread( self, response ):
"""Called within the context of the LanguageServerConnection's message pump
when the initialize request receives a response."""
with self._server_info_mutex:
self._server_capabilities = response[ 'result' ][ 'capabilities' ]
self._resolve_completion_items = self._ShouldResolveCompletionItems()
self._is_completion_provider = (
'completionProvider' in self._server_capabilities )
if 'textDocumentSync' in self._server_capabilities:
sync = self._server_capabilities[ 'textDocumentSync' ]
SYNC_TYPE = [
'None',
'Full',
'Incremental'
]
# The sync type can either be a number or an object. Because it's
# important to make things difficult.
if isinstance( sync, dict ):
if 'change' in sync:
sync = sync[ 'change' ]
else:
sync = 1
self._sync_type = SYNC_TYPE[ sync ]
LOGGER.info( 'Language server requires sync type of %s',
self._sync_type )
# Update our semantic triggers if they are supplied by the server
if self.completion_triggers is not None:
server_trigger_characters = (
( self._server_capabilities.get( 'completionProvider' ) or {} )
.get( 'triggerCharacters' ) or []
)
LOGGER.debug( '%s: Server declares trigger characters: %s',
self.Language(),
server_trigger_characters )
trigger_characters = self.GetTriggerCharacters(
server_trigger_characters )
if trigger_characters:
LOGGER.info( '%s: Using trigger characters for semantic triggers: %s',
self.Language(),
','.join( trigger_characters ) )
self.completion_triggers.SetServerSemanticTriggers(
trigger_characters )
if self._signature_triggers is not None:
server_trigger_characters = (
( self._server_capabilities.get( 'signatureHelpProvider' ) or {} )
.get( 'triggerCharacters' ) or []
)
LOGGER.debug( '%s: Server declares signature trigger characters: %s',
self.Language(),
server_trigger_characters )
trigger_characters = self.GetSignatureTriggerCharacters(
server_trigger_characters )
if trigger_characters:
LOGGER.info( '%s: Using characters for signature triggers: %s',
self.Language(),
','.join( trigger_characters ) )
self.SetSignatureHelpTriggers( trigger_characters )
# We must notify the server that we received the initialize response (for
# no apparent reason, other than that's what the protocol says).
self.GetConnection().SendNotification( lsp.Initialized() )
# Some language servers require the use of didChangeConfiguration event,
# even though it is not clear in the specification that it is mandatory,
# nor when it should be sent. VSCode sends it immediately after
# initialized notification, so we do the same.
# FIXME: According to
# https://github.com/Microsoft/language-server-protocol/issues/567 the
# configuration should be send in response to a workspace/configuration
# request?
self.GetConnection().SendNotification(
lsp.DidChangeConfiguration( self._settings.get( 'ls', {} ) ) )
# Notify the other threads that we have completed the initialize exchange.
self._initialize_response = None
self._initialize_event.set()
# Fire any events that are pending on the completion of the initialize
# exchange. Typically, this will be calls to _UpdateServerWithFileContents
# or something that occurred while we were waiting.
for handler in self._on_initialize_complete_handlers:
handler( self )
self._on_initialize_complete_handlers = []
def _OnInitializeComplete( self, handler ):
"""Register a function to be called when the initialize exchange completes.
The function |handler| will be called on successful completion of the
initialize exchange with a single argument |self|, which is the |self|
passed to this method.
If the server is shut down or reset, the callback is not called."""
self._on_initialize_complete_handlers.append( handler )
def RegisterOnFileReadyToParse( self, handler ):
self._on_file_ready_to_parse_handlers.append( handler )
def GetHoverResponse( self, request_data ):
"""Return the raw LSP response to the hover request for the supplied
context. Implementations can use this for e.g. GetDoc and GetType requests,
depending on the particular server response."""
if not self._ServerIsInitialized():
raise RuntimeError( 'Server is initializing. Please wait.' )
self._UpdateServerWithFileContents( request_data )
request_id = self.GetConnection().NextRequestId()
response = self.GetConnection().GetResponse(
request_id,
lsp.Hover( request_id, request_data ),
REQUEST_TIMEOUT_COMMAND )
result = response[ 'result' ]
if result:
return result[ 'contents' ]
raise NoHoverInfoException( NO_HOVER_INFORMATION )
def _GoToRequest( self, request_data, handler ):
request_id = self.GetConnection().NextRequestId()
result = self.GetConnection().GetResponse(
request_id,
getattr( lsp, handler )( request_id, request_data ),
REQUEST_TIMEOUT_COMMAND )[ 'result' ]
if not result:
raise RuntimeError( 'Cannot jump to location' )
if not isinstance( result, list ):
return [ result ]
return result
def GoTo( self, request_data, handlers ):
"""Issues a GoTo request for each handler in |handlers| until it returns
multiple locations or a location the cursor does not belong since the user
wants to jump somewhere else. If that's the last handler, the location is
returned anyway."""
if not self.ServerIsReady():
raise RuntimeError( 'Server is initializing. Please wait.' )
self._UpdateServerWithFileContents( request_data )
if len( handlers ) == 1:
result = self._GoToRequest( request_data, handlers[ 0 ] )
else:
for handler in handlers:
result = self._GoToRequest( request_data, handler )
if len( result ) > 1 or not _CursorInsideLocation( request_data,
result[ 0 ] ):
break
return _LocationListToGoTo( request_data, result )
def GetCodeActions( self, request_data, args ):
"""Performs the codeAction request and returns the result as a FixIt
response."""
if not self.ServerIsReady():
raise RuntimeError( 'Server is initializing. Please wait.' )
self._UpdateServerWithFileContents( request_data )
line_num_ls = request_data[ 'line_num' ] - 1
request_id = self.GetConnection().NextRequestId()
if 'range' in request_data:
code_actions = self.GetConnection().GetResponse(
request_id,
lsp.CodeAction( request_id,
request_data,
lsp.Range( request_data ),
[] ),
REQUEST_TIMEOUT_COMMAND )
else:
def WithinRange( diag ):
start = diag[ 'range' ][ 'start' ]
end = diag[ 'range' ][ 'end' ]
if line_num_ls < start[ 'line' ] or line_num_ls > end[ 'line' ]:
return False
return True
with self._server_info_mutex:
file_diagnostics = list( self._latest_diagnostics[
lsp.FilePathToUri( request_data[ 'filepath' ] ) ] )
matched_diagnostics = [
d for d in file_diagnostics if WithinRange( d )
]
if matched_diagnostics:
code_actions = self.GetConnection().GetResponse(
request_id,
lsp.CodeAction( request_id,
request_data,
matched_diagnostics[ 0 ][ 'range' ],
matched_diagnostics ),
REQUEST_TIMEOUT_COMMAND )
else:
line_value = request_data[ 'line_value' ]
code_actions = self.GetConnection().GetResponse(
request_id,
lsp.CodeAction(
request_id,
request_data,
# Use the whole line
{
'start': {
'line': line_num_ls,
'character': 0,
},
'end': {
'line': line_num_ls,
'character': lsp.CodepointsToUTF16CodeUnits(
line_value,
len( line_value ) + 1 ) - 1,
}
},
[] ),
REQUEST_TIMEOUT_COMMAND )
return self.CodeActionResponseToFixIts( request_data,
code_actions[ 'result' ] )
def CodeActionResponseToFixIts( self, request_data, code_actions ):
if code_actions is None:
return responses.BuildFixItResponse( [] )
fixits = []
for code_action in code_actions:
if 'edit' in code_action:
# TODO: Start supporting a mix of WorkspaceEdits and Commands
# once there's a need for such
assert 'command' not in code_action
# This is a WorkspaceEdit literal
fixits.append( self.CodeActionLiteralToFixIt( request_data,
code_action ) )
continue
# Either a CodeAction or a Command
assert 'command' in code_action
action_command = code_action[ 'command' ]
if isinstance( action_command, dict ):
# CodeAction with a 'command' rather than 'edit'
fixits.append( self.CodeActionCommandToFixIt( request_data,
code_action ) )
continue
# It is a Command
fixits.append( self.CommandToFixIt( request_data, code_action ) )
# Show a list of actions to the user to select which one to apply.
# This is (probably) a more common workflow for "code action".
result = [ r for r in fixits if r ]
if len( result ) == 1:
fixit = result[ 0 ]
if hasattr( fixit, 'resolve' ):
# Be nice and resolve the fixit to save on roundtrips
unresolved_fixit = {
'command': fixit.command,
'text': fixit.text,
'resolve': fixit.resolve
}
return self._ResolveFixit( request_data, unresolved_fixit )
return responses.BuildFixItResponse( result )
def CodeActionLiteralToFixIt( self, request_data, code_action_literal ):
return WorkspaceEditToFixIt( request_data,
code_action_literal[ 'edit' ],
code_action_literal[ 'title' ] )
def CodeActionCommandToFixIt( self, request_data, code_action_command ):
command = code_action_command[ 'command' ]
return self.CommandToFixIt( request_data, command )
def CommandToFixIt( self, request_data, command ):
return responses.UnresolvedFixIt( command, command[ 'title' ] )
def RefactorRename( self, request_data, args ):
"""Issues the rename request and returns the result as a FixIt response."""
if not self.ServerIsReady():
raise RuntimeError( 'Server is initializing. Please wait.' )
if len( args ) != 1:
raise ValueError( 'Please specify a new name to rename it to.\n'
'Usage: RefactorRename <new name>' )
self._UpdateServerWithFileContents( request_data )
new_name = args[ 0 ]
request_id = self.GetConnection().NextRequestId()
response = self.GetConnection().GetResponse(
request_id,
lsp.Rename( request_id, request_data, new_name ),
REQUEST_TIMEOUT_COMMAND )
fixit = WorkspaceEditToFixIt( request_data, response[ 'result' ] )
if not fixit:
raise RuntimeError( 'Cannot rename the symbol under cursor.' )
return responses.BuildFixItResponse( [ fixit ] )
def AdditionalFormattingOptions( self, request_data ):
# While we have the settings in self._settings[ 'formatting_options' ], we
# actually run Settings again here, which allows users to have different
# formatting options for different files etc. if they should decide that's
# appropriate.
module = extra_conf_store.ModuleForSourceFile( request_data[ 'filepath' ] )
try:
settings = self.GetSettings( module, request_data )
return settings.get( 'formatting_options', {} )
except AttributeError:
return {}
def Format( self, request_data ):
"""Issues the formatting or rangeFormatting request (depending on the
presence of a range) and returns the result as a FixIt response."""
if not self.ServerIsReady():
raise RuntimeError( 'Server is initializing. Please wait.' )
self._UpdateServerWithFileContents( request_data )
request_data[ 'options' ].update(
self.AdditionalFormattingOptions( request_data ) )
request_id = self.GetConnection().NextRequestId()
if 'range' in request_data:
message = lsp.RangeFormatting( request_id, request_data )
else:
message = lsp.Formatting( request_id, request_data )
response = self.GetConnection().GetResponse( request_id,
message,
REQUEST_TIMEOUT_COMMAND )
filepath = request_data[ 'filepath' ]
contents = GetFileLines( request_data, filepath )
chunks = [ responses.FixItChunk( text_edit[ 'newText' ],
_BuildRange( contents,
filepath,
text_edit[ 'range' ] ) )
for text_edit in response[ 'result' ] or [] ]
return responses.BuildFixItResponse( [ responses.FixIt(
responses.Location( request_data[ 'line_num' ],
request_data[ 'column_num' ],
request_data[ 'filepath' ] ),
chunks ) ] )
def _ResolveFixit( self, request_data, fixit ):
if not fixit[ 'resolve' ]:
return { 'fixits': [ fixit ] }
unresolved_fixit = fixit[ 'command' ]
collector = EditCollector()
with self.GetConnection().CollectApplyEdits( collector ):
self.GetCommandResponse(
request_data,
unresolved_fixit[ 'command' ],
unresolved_fixit[ 'arguments' ] )
# Return a ycmd fixit
response = collector.requests
assert len( response ) == 1
fixit = WorkspaceEditToFixIt(
request_data,
response[ 0 ][ 'edit' ],
unresolved_fixit[ 'title' ] )
return responses.BuildFixItResponse( [ fixit ] )
def ResolveFixit( self, request_data ):
return self._ResolveFixit( request_data, request_data[ 'fixit' ] )
def ExecuteCommand( self, request_data, args ):
if not args:
raise ValueError( 'Must specify a command to execute' )
# We don't have any actual knowledge of the responses here. Unfortunately,
# the LSP "comamnds" require client/server specific understanding of the
# commands.
collector = EditCollector()
with self.GetConnection().CollectApplyEdits( collector ):
command_response = self.GetCommandResponse( request_data,
args[ 0 ],
args[ 1: ] )
edits = collector.requests
response = self.HandleServerCommandResponse( request_data,
edits,
command_response )
if response is not None:
return response
if len( edits ):
fixits = [ WorkspaceEditToFixIt(
request_data,
e[ 'edit' ],
'' ) for e in edits ]
return responses.BuildFixItResponse( fixits )
return responses.BuildDetailedInfoResponse( json.dumps( command_response,
indent = 2 ) )
def GetCommandResponse( self, request_data, command, arguments ):
if not self.ServerIsReady():
raise RuntimeError( 'Server is initializing. Please wait.' )
self._UpdateServerWithFileContents( request_data )
request_id = self.GetConnection().NextRequestId()
message = lsp.ExecuteCommand( request_id, command, arguments )
response = self.GetConnection().GetResponse( request_id,
message,
REQUEST_TIMEOUT_COMMAND )
return response[ 'result' ]
def CommonDebugItems( self ):
def ServerStateDescription():
if not self.ServerIsHealthy():
return 'Dead'
if not self._ServerIsInitialized():
return 'Starting...'
return 'Initialized'
return [ responses.DebugInfoItem( 'Server State',
ServerStateDescription() ),
responses.DebugInfoItem( 'Project Directory',
self._project_directory ),
responses.DebugInfoItem(
'Settings',
json.dumps( self._settings.get( 'ls', {} ),
indent = 2,
sort_keys = True ) ) ]
def _DistanceOfPointToRange( point, range ):
"""Calculate the distance from a point to a range.
Assumes point is covered by lines in the range.
Returns 0 if point is already inside range. """
start = range[ 'start' ]
end = range[ 'end' ]
# Single-line range.
if start[ 'line' ] == end[ 'line' ]:
# 0 if point is within range, otherwise distance from start/end.
return max( 0, point[ 'character' ] - end[ 'character' ],
start[ 'character' ] - point[ 'character' ] )
if start[ 'line' ] == point[ 'line' ]:
return max( 0, start[ 'character' ] - point[ 'character' ] )
if end[ 'line' ] == point[ 'line' ]:
return max( 0, point[ 'character' ] - end[ 'character' ] )
# If not on the first or last line, then point is within range for sure.
return 0
def _CompletionItemToCompletionData( insertion_text, item, fixits ):
# Since we send completionItemKind capabilities, we guarantee to handle
# values outside our value set and fall back to a default.
try:
kind = lsp.ITEM_KIND[ item.get( 'kind' ) or 0 ]
except IndexError:
kind = lsp.ITEM_KIND[ 0 ] # Fallback to None for unsupported kinds.
documentation = item.get( 'documentation' ) or ''
if isinstance( documentation, dict ):
documentation = documentation[ 'value' ]
return responses.BuildCompletionData(
insertion_text,
extra_menu_info = item.get( 'detail' ),
detailed_info = item[ 'label' ] + '\n\n' + documentation,
menu_text = item[ 'label' ],
kind = kind,
extra_data = fixits )
def _FixUpCompletionPrefixes( completions,
start_codepoints,
request_data,
min_start_codepoint ):
"""Fix up the insertion texts so they share the same start_codepoint by
borrowing text from the source."""
line = request_data[ 'line_value' ]
for completion, start_codepoint in zip( completions, start_codepoints ):
to_borrow = start_codepoint - min_start_codepoint
if to_borrow > 0:
borrow = line[ start_codepoint - to_borrow - 1 : start_codepoint - 1 ]
new_insertion_text = borrow + completion[ 'insertion_text' ]
completion[ 'insertion_text' ] = new_insertion_text
# Finally, remove any common prefix
common_prefix_len = len( os.path.commonprefix(
[ c[ 'insertion_text' ] for c in completions ] ) )
for completion in completions:
completion[ 'insertion_text' ] = completion[ 'insertion_text' ][
common_prefix_len : ]
# The start column is the earliest start point that we fixed up plus the
# length of the common prefix that we subsequently removed.
#
# Phew! That was hard work.
request_data[ 'start_codepoint' ] = min_start_codepoint + common_prefix_len
return completions
def _InsertionTextForItem( request_data, item ):
"""Determines the insertion text for the completion item |item|, and any
additional FixIts that need to be applied when selecting it.
Returns a tuple (
- insertion_text = the text to insert
- fixits = ycmd fixit which needs to be applied additionally when
selecting this completion
- start_codepoint = the start column at which the text should be inserted
)"""
# We do not support completion types of "Snippet". This is implicit in that we
# don't say it is a "capability" in the initialize request.
# Abort this request if the server is buggy and ignores us.
assert lsp.INSERT_TEXT_FORMAT[
item.get( 'insertTextFormat' ) or 1 ] == 'PlainText'
fixits = None
start_codepoint = request_data[ 'start_codepoint' ]
# We will always have one of insertText or label
if 'insertText' in item and item[ 'insertText' ]:
insertion_text = item[ 'insertText' ]
else:
insertion_text = item[ 'label' ]
additional_text_edits = []
# Per the protocol, textEdit takes precedence over insertText, and must be
# on the same line (and containing) the originally requested position. These
# are a pain, and require fixing up later in some cases, as most of our
# clients won't be able to apply arbitrary edits (only 'completion', as
# opposed to 'content assist').
if 'textEdit' in item and item[ 'textEdit' ]:
text_edit = item[ 'textEdit' ]
start_codepoint = _GetCompletionItemStartCodepointOrReject( text_edit,
request_data )
insertion_text = text_edit[ 'newText' ]
if '\n' in insertion_text:
# jdt.ls can return completions which generate code, such as
# getters/setters and entire anonymous classes.
#
# In order to support this we would need to do something like:
# - invent some insertion_text based on label/insertText (or perhaps
# '<snippet>'
# - insert a textEdit in additionalTextEdits which deletes this
# insertion
# - or perhaps just modify this textEdit to undo that change?
# - or perhaps somehow support insertion_text of '' (this doesn't work
# because of filtering/sorting, etc.).
# - insert this textEdit in additionalTextEdits
#
# These textEdits would need a lot of fixing up and is currently out of
# scope.
#
# These sorts of completions aren't really in the spirit of ycmd at the
# moment anyway. So for now, we just ignore this candidate.
raise IncompatibleCompletionException( insertion_text )
else:
# Calculate the start codepoint based on the overlapping text in the
# insertion text and the existing text. This is the behavior of Visual
# Studio Code and therefore de-facto undocumented required behavior of LSP
# clients.
start_codepoint -= FindOverlapLength( request_data[ 'prefix' ],
insertion_text )
additional_text_edits.extend( item.get( 'additionalTextEdits' ) or [] )
if additional_text_edits:
filepath = request_data[ 'filepath' ]
contents = GetFileLines( request_data, filepath )
chunks = [ responses.FixItChunk( e[ 'newText' ],
_BuildRange( contents,
filepath,
e[ 'range' ] ) )
for e in additional_text_edits ]
fixits = responses.BuildFixItResponse(
[ responses.FixIt( chunks[ 0 ].range.start_, chunks ) ] )
return insertion_text, fixits, start_codepoint
def FindOverlapLength( line_value, insertion_text ):
"""Return the length of the longest suffix of |line_value| which is a prefix
of |insertion_text|"""
# Credit: https://neil.fraser.name/news/2010/11/04/
# Example of what this does:
# line_value: import com.
# insertion_text: com.youcompleteme.test
# Overlap: ^..^
# Overlap Len: 4
# Calculated as follows:
# - truncate:
# line_value = import com.
# insertion_text = com.youcomp
# - assume overlap length 1
# overlap_text = "."
# position = 3
# overlap set to be 4
# com. compared with com.: longest_overlap = 4
# - assume overlap length 5
# overlap_text = " com."
# position = -1
# return 4 (from previous iteration)
# More complex example: 'Some CoCo' vs 'CoCo Bean'
# No truncation
# Iter 1 (overlap = 1): p('o') = 1, overlap = 2, Co==Co, best = 2 (++)
# Iter 2 (overlap = 3): p('oCo') = 1 overlap = 4, CoCo==CoCo, best = 4 (++)
# Iter 3 (overlap = 5): p(' CoCo') = -1, return 4
# And the non-overlap case "aaab" "caab":
# Iter 1 (overlap = 1): p('b') = 3, overlap = 4, aaab!=caab, return 0
line_value_len = len( line_value )
insertion_text_len = len( insertion_text )
# Bail early if either are empty
if line_value_len == 0 or insertion_text_len == 0:
return 0
# Truncate so that they are the same length. Keep the overlapping sections
# (suffix of line_value, prefix of insertion_text).
if line_value_len > insertion_text_len:
line_value = line_value[ -insertion_text_len : ]
elif insertion_text_len > line_value_len:
insertion_text = insertion_text[ : line_value_len ]
# Worst case is full overlap, but that's trivial to check.
if insertion_text == line_value:
return min( line_value_len, insertion_text_len )
longest_matching_overlap = 0
# Assume a single-character of overlap, and find where this appears (if at
# all) in the insertion_text
overlap = 1
while True:
# Find the position of the overlap-length suffix of line_value within
# insertion_text
overlap_text = line_value[ -overlap : ]
position = insertion_text.find( overlap_text )
# If it isn't found, then we're done, return the last known overlap length.
if position == -1:
return longest_matching_overlap
# Assume that all of the characters up to where this suffix was found
# overlap. If they do, assume 1 more character of overlap, and continue.
# Otherwise, we're done.
overlap += position
# If the overlap section matches, then we know this is the longest overlap
# we've seen so far.
if line_value[ -overlap : ] == insertion_text[ : overlap ]:
longest_matching_overlap = overlap
overlap += 1
def _GetCompletionItemStartCodepointOrReject( text_edit, request_data ):
edit_range = text_edit[ 'range' ]
# Conservatively rejecting candidates that breach the protocol
if edit_range[ 'start' ][ 'line' ] != edit_range[ 'end' ][ 'line' ]:
raise IncompatibleCompletionException(
"The TextEdit '{0}' spans multiple lines".format(
text_edit[ 'newText' ] ) )
file_contents = GetFileLines( request_data, request_data[ 'filepath' ] )
line_value = file_contents[ edit_range[ 'start' ][ 'line' ] ]
start_codepoint = lsp.UTF16CodeUnitsToCodepoints(
line_value,
edit_range[ 'start' ][ 'character' ] + 1 )
if start_codepoint > request_data[ 'start_codepoint' ]:
raise IncompatibleCompletionException(
"The TextEdit '{0}' starts after the start position".format(
text_edit[ 'newText' ] ) )
return start_codepoint
def _LocationListToGoTo( request_data, positions ):
"""Convert a LSP list of locations to a ycmd GoTo response."""
try:
if len( positions ) > 1:
return [
responses.BuildGoToResponseFromLocation(
*_PositionToLocationAndDescription( request_data, position ) )
for position in positions
]
return responses.BuildGoToResponseFromLocation(
*_PositionToLocationAndDescription( request_data, positions[ 0 ] ) )
except ( IndexError, KeyError ):
raise RuntimeError( 'Cannot jump to location' )
def _PositionToLocationAndDescription( request_data, position ):
"""Convert a LSP position to a ycmd location."""
try:
filename = lsp.UriToFilePath( position[ 'uri' ] )
file_contents = GetFileLines( request_data, filename )
except lsp.InvalidUriException:
LOGGER.debug( 'Invalid URI, file contents not available in GoTo' )
filename = ''
file_contents = []
except IOError:
# It's possible to receive positions for files which no longer exist (due to
# race condition). UriToFilePath doesn't throw IOError, so we can assume
# that filename is already set.
LOGGER.exception( 'A file could not be found when determining a '
'GoTo location' )
file_contents = []
return _BuildLocationAndDescription( filename,
file_contents,
position[ 'range' ][ 'start' ] )
def _LspToYcmdLocation( file_contents, location ):
"""Converts a LSP location to a ycmd one. Returns a tuple of (
- the contents of the line of |location|
- the line number of |location|
- the byte offset converted from the UTF-16 offset of |location|
)"""
line_num = location[ 'line' ] + 1
try:
line_value = file_contents[ location[ 'line' ] ]
return line_value, line_num, utils.CodepointOffsetToByteOffset(
line_value,
lsp.UTF16CodeUnitsToCodepoints( line_value,
location[ 'character' ] + 1 ) )
except IndexError:
# This can happen when there are stale diagnostics in OnFileReadyToParse,
# just return the value as-is.
return '', line_num, location[ 'character' ] + 1
def _CursorInsideLocation( request_data, location ):
try:
filepath = lsp.UriToFilePath( location[ 'uri' ] )
except lsp.InvalidUriException:
LOGGER.debug( 'Invalid URI, assume cursor is not inside the location' )
return False
if request_data[ 'filepath' ] != filepath:
return False
line = request_data[ 'line_num' ]
column = request_data[ 'column_num' ]
file_contents = GetFileLines( request_data, filepath )
lsp_range = location[ 'range' ]
_, start_line, start_column = _LspToYcmdLocation( file_contents,
lsp_range[ 'start' ] )
if ( line < start_line or
( line == start_line and column < start_column ) ):
return False
_, end_line, end_column = _LspToYcmdLocation( file_contents,
lsp_range[ 'end' ] )
if ( line > end_line or
( line == end_line and column > end_column ) ):
return False
return True
def _BuildLocationAndDescription( filename, file_contents, location ):
"""Returns a tuple of (
- ycmd Location for the supplied filename and LSP location
- contents of the line at that location
)
Importantly, converts from LSP Unicode offset to ycmd byte offset."""
line_value, line, column = _LspToYcmdLocation( file_contents, location )
return responses.Location( line, column, filename = filename ), line_value
def _BuildRange( contents, filename, r ):
"""Returns a ycmd range from a LSP range |r|."""
return responses.Range( _BuildLocationAndDescription( filename,
contents,
r[ 'start' ] )[ 0 ],
_BuildLocationAndDescription( filename,
contents,
r[ 'end' ] )[ 0 ] )
def _BuildDiagnostic( contents, uri, diag ):
"""Return a ycmd diagnostic from a LSP diagnostic."""
try:
filename = lsp.UriToFilePath( uri )
except lsp.InvalidUriException:
LOGGER.debug( 'Invalid URI received for diagnostic' )
filename = ''
r = _BuildRange( contents, filename, diag[ 'range' ] )
diag_text = diag[ 'message' ]
try:
code = diag[ 'code' ]
diag_text += " [" + str( code ) + "]"
except KeyError:
# code field doesn't exist.
pass
return responses.Diagnostic(
ranges = [ r ],
location = r.start_,
location_extent = r,
text = diag_text,
kind = lsp.SEVERITY[ diag[ 'severity' ] ].upper() )
def TextEditToChunks( request_data, uri, text_edit ):
"""Returns a list of FixItChunks from a LSP textEdit."""
try:
filepath = lsp.UriToFilePath( uri )
except lsp.InvalidUriException:
LOGGER.debug( 'Invalid filepath received in TextEdit' )
filepath = ''
contents = GetFileLines( request_data, filepath )
return [
responses.FixItChunk( change[ 'newText' ],
_BuildRange( contents,
filepath,
change[ 'range' ] ) )
for change in text_edit
]
def WorkspaceEditToFixIt( request_data, workspace_edit, text='' ):
"""Converts a LSP workspace edit to a ycmd FixIt suitable for passing to
responses.BuildFixItResponse."""
if not workspace_edit:
return None
if 'changes' in workspace_edit:
chunks = []
# We sort the filenames to make the response stable. Edits are applied in
# strict sequence within a file, but apply to files in arbitrary order.
# However, it's important for the response to be stable for the tests.
for uri in sorted( workspace_edit[ 'changes' ].keys() ):
chunks.extend( TextEditToChunks( request_data,
uri,
workspace_edit[ 'changes' ][ uri ] ) )
else:
chunks = []
for text_document_edit in workspace_edit[ 'documentChanges' ]:
uri = text_document_edit[ 'textDocument' ][ 'uri' ]
edits = text_document_edit[ 'edits' ]
chunks.extend( TextEditToChunks( request_data, uri, edits ) )
return responses.FixIt(
responses.Location( request_data[ 'line_num' ],
request_data[ 'column_num' ],
request_data[ 'filepath' ] ),
chunks,
text )
class LanguageServerCompletionsCache( CompletionsCache ):
"""Cache of computed LSP completions for a particular request."""
def Invalidate( self ):
with self._access_lock:
super().InvalidateNoLock()
self._is_incomplete = False
self._use_start_column = True
def Update( self, request_data, completions, is_incomplete ):
with self._access_lock:
super().UpdateNoLock( request_data, completions )
self._is_incomplete = is_incomplete
if is_incomplete:
self._use_start_column = False
def GetCodepointForCompletionRequest( self, request_data ):
with self._access_lock:
if self._use_start_column:
return request_data[ 'start_codepoint' ]
return request_data[ 'column_codepoint' ]
# Must be called under the lock.
def _IsQueryPrefix( self, request_data ):
return request_data[ 'query' ].startswith( self._request_data[ 'query' ] )
def GetCompletionsIfCacheValid( self, request_data ):
with self._access_lock:
if ( not self._is_incomplete and
( self._use_start_column or self._IsQueryPrefix( request_data ) ) ):
return super().GetCompletionsIfCacheValidNoLock( request_data )
return None
class RejectCollector:
def CollectApplyEdit( self, request, connection ):
connection.SendResponse( lsp.ApplyEditResponse( request, False ) )
class EditCollector:
def __init__( self ):
self.requests = []
def CollectApplyEdit( self, request, connection ):
self.requests.append( request[ 'params' ] )
connection.SendResponse( lsp.ApplyEditResponse( request, True ) )
class WatchdogHandler( PatternMatchingEventHandler ):
def __init__( self, server, patterns ):
super().__init__( patterns )
self._server = server
def on_created( self, event ):
if self._server.ServerIsReady():
with self._server._server_info_mutex:
msg = lsp.DidChangeWatchedFiles( event.src_path, 'create' )
self._server.GetConnection().SendNotification( msg )
def on_modified( self, event ):
if self._server.ServerIsReady():
with self._server._server_info_mutex:
msg = lsp.DidChangeWatchedFiles( event.src_path, 'modify' )
self._server.GetConnection().SendNotification( msg )
def on_deleted( self, event ):
if self._server.ServerIsReady():
with self._server._server_info_mutex:
msg = lsp.DidChangeWatchedFiles( event.src_path, 'delete' )
self._server.GetConnection().SendNotification( msg )
| 37.908631 | 80 | 0.664185 |
d4ef26f99cc51f711a57de6038e2a8fafe15200c | 16,346 | py | Python | salt/cloud/clouds/lxc.py | diego-treitos/salt | d2aec156ff2ef48ac21b4db211efb43220c6465c | [
"Apache-2.0"
] | 1 | 2020-10-19T11:49:49.000Z | 2020-10-19T11:49:49.000Z | salt/cloud/clouds/lxc.py | diego-treitos/salt | d2aec156ff2ef48ac21b4db211efb43220c6465c | [
"Apache-2.0"
] | null | null | null | salt/cloud/clouds/lxc.py | diego-treitos/salt | d2aec156ff2ef48ac21b4db211efb43220c6465c | [
"Apache-2.0"
] | 1 | 2020-10-19T11:49:50.000Z | 2020-10-19T11:49:50.000Z | # -*- coding: utf-8 -*-
'''
Install Salt on an LXC Container
================================
.. versionadded:: 2014.7.0
Please read :ref:`core config documentation <config_lxc>`.
'''
# Import python libs
from __future__ import absolute_import
import json
import os
import logging
import copy
import time
from pprint import pformat
# Import salt libs
import salt.utils
# Import salt cloud libs
import salt.utils.cloud
import salt.config as config
from salt.exceptions import SaltCloudSystemExit
import salt.client
import salt.runner
import salt.syspaths
# Import 3rd-party libs
import salt.ext.six as six
# Get logging started
log = logging.getLogger(__name__)
__FUN_TIMEOUT = {
'cmd.run': 60 * 60,
'test.ping': 10,
'lxc.info': 40,
'lxc.list': 300,
'lxc.templates': 100,
'grains.items': 100,
}
__CACHED_CALLS = {}
__CACHED_FUNS = {
'test.ping': 3 * 60, # cache ping for 3 minutes
'lxc.list': 2 # cache lxc.list for 2 seconds
}
def __virtual__():
'''
Needs no special configuration
'''
return True
def _get_grain_id(id_):
if not get_configured_provider():
return
infos = get_configured_provider()
return 'salt.cloud.lxc.{0}.{1}'.format(infos['target'], id_)
def _minion_opts(cfg='minion'):
if 'conf_file' in __opts__:
default_dir = os.path.dirname(__opts__['conf_file'])
else:
default_dir = salt.syspaths.CONFIG_DIR,
cfg = os.environ.get(
'SALT_MINION_CONFIG', os.path.join(default_dir, cfg))
opts = config.minion_config(cfg)
return opts
def _master_opts(cfg='master'):
cfg = os.environ.get(
'SALT_MASTER_CONFIG',
__opts__.get('conf_file',
os.path.join(salt.syspaths.CONFIG_DIR, cfg)))
opts = config.master_config(cfg)
return opts
def _client():
return salt.client.get_local_client(mopts=_master_opts())
def _runner():
# opts = _master_opts()
# opts['output'] = 'quiet'
return salt.runner.RunnerClient(_master_opts())
def _salt(fun, *args, **kw):
'''Execute a salt function on a specific minion
Special kwargs:
salt_target
target to exec things on
salt_timeout
timeout for jobs
salt_job_poll
poll interval to wait for job finish result
'''
try:
poll = kw.pop('salt_job_poll')
except KeyError:
poll = 0.1
try:
target = kw.pop('salt_target')
except KeyError:
target = None
try:
timeout = int(kw.pop('salt_timeout'))
except (KeyError, ValueError):
# try to has some low timeouts for very basic commands
timeout = __FUN_TIMEOUT.get(
fun,
900 # wait up to 15 minutes for the default timeout
)
try:
kwargs = kw.pop('kwargs')
except KeyError:
kwargs = {}
if not target:
infos = get_configured_provider()
if not infos:
return
target = infos['target']
laps = time.time()
cache = False
if fun in __CACHED_FUNS:
cache = True
laps = laps // __CACHED_FUNS[fun]
try:
sargs = json.dumps(args)
except TypeError:
sargs = ''
try:
skw = json.dumps(kw)
except TypeError:
skw = ''
try:
skwargs = json.dumps(kwargs)
except TypeError:
skwargs = ''
cache_key = (laps, target, fun, sargs, skw, skwargs)
if not cache or (cache and (cache_key not in __CACHED_CALLS)):
conn = _client()
runner = _runner()
rkwargs = kwargs.copy()
rkwargs['timeout'] = timeout
rkwargs.setdefault('expr_form', 'list')
kwargs.setdefault('expr_form', 'list')
ping_retries = 0
# the target(s) have environ one minute to respond
# we call 60 ping request, this prevent us
# from blindly send commands to unmatched minions
ping_max_retries = 60
ping = True
# do not check ping... if we are pinguing
if fun == 'test.ping':
ping_retries = ping_max_retries + 1
# be sure that the executors are alive
while ping_retries <= ping_max_retries:
try:
if ping_retries > 0:
time.sleep(1)
pings = conn.cmd(tgt=target,
timeout=10,
fun='test.ping')
values = list(pings.values())
if not values:
ping = False
for v in values:
if v is not True:
ping = False
if not ping:
raise ValueError('Unreachable')
break
except Exception:
ping = False
ping_retries += 1
log.error('{0} unreachable, retrying'.format(target))
if not ping:
raise SaltCloudSystemExit('Target {0} unreachable'.format(target))
jid = conn.cmd_async(tgt=target,
fun=fun,
arg=args,
kwarg=kw,
**rkwargs)
cret = conn.cmd(tgt=target,
fun='saltutil.find_job',
arg=[jid],
timeout=10,
**kwargs)
running = bool(cret.get(target, False))
endto = time.time() + timeout
while running:
rkwargs = {
'tgt': target,
'fun': 'saltutil.find_job',
'arg': [jid],
'timeout': 10
}
cret = conn.cmd(**rkwargs)
running = bool(cret.get(target, False))
if not running:
break
if running and (time.time() > endto):
raise Exception('Timeout {0}s for {1} is elapsed'.format(
timeout, pformat(rkwargs)))
time.sleep(poll)
# timeout for the master to return data about a specific job
wait_for_res = float({
'test.ping': '5',
}.get(fun, '120'))
while wait_for_res:
wait_for_res -= 0.5
cret = runner.cmd(
'jobs.lookup_jid',
[jid, {'__kwarg__': True}])
if target in cret:
ret = cret[target]
break
# recent changes
elif 'data' in cret and 'outputter' in cret:
ret = cret['data']
break
# special case, some answers may be crafted
# to handle the unresponsivness of a specific command
# which is also meaningful, e.g. a minion not yet provisioned
if fun in ['test.ping'] and not wait_for_res:
ret = {
'test.ping': False,
}.get(fun, False)
time.sleep(0.5)
try:
if 'is not available.' in ret:
raise SaltCloudSystemExit(
'module/function {0} is not available'.format(fun))
except SaltCloudSystemExit:
raise
except TypeError:
pass
if cache:
__CACHED_CALLS[cache_key] = ret
elif cache and cache_key in __CACHED_CALLS:
ret = __CACHED_CALLS[cache_key]
return ret
def avail_images():
return _salt('lxc.templates')
def list_nodes(conn=None, call=None):
hide = False
names = __opts__.get('names', [])
profile = __opts__.get('profile', [])
destroy_opt = __opts__.get('destroy', False)
action = __opts__.get('action', '')
for opt in ['full_query', 'select_query', 'query']:
if __opts__.get(opt, False):
call = 'full'
if destroy_opt:
call = 'full'
if action and not call:
call = 'action'
if profile and names and not destroy_opt:
hide = True
if not get_configured_provider():
return
lxclist = _salt('lxc.list', extra=True)
nodes = {}
for state, lxcs in six.iteritems(lxclist):
for lxcc, linfos in six.iteritems(lxcs):
info = {
'id': lxcc,
'image': None,
'size': linfos['size'],
'state': state.lower(),
'public_ips': linfos['public_ips'],
'private_ips': linfos['private_ips'],
}
# in creation mode, we need to go inside the create method
# so we hide the running vm from being seen as already installed
# do not also mask half configured nodes which are explicitly asked
# to be acted on, on the command line
if (
(call in ['full'] or not hide)
and (
(lxcc in names and call in ['action'])
or (call in ['full'])
)
):
nodes[lxcc] = info
return nodes
def list_nodes_full(conn=None, call=None):
if not get_configured_provider():
return
if not call:
call = 'action'
return list_nodes(conn=conn, call=call)
def show_instance(name, call=None):
'''
Show the details from the provider concerning an instance
'''
if not get_configured_provider():
return
if not call:
call = 'action'
nodes = list_nodes_full(call=call)
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
if not call:
call = 'select'
if not get_configured_provider():
return
info = ['id', 'image', 'size', 'state', 'public_ips', 'private_ips']
return salt.utils.cloud.list_nodes_select(
list_nodes_full(call='action'),
__opts__.get('query.selection', info), call)
def _checkpoint(ret):
sret = '''
id: {name}
last message: {comment}'''.format(**ret)
keys = list(ret['changes'].items())
keys.sort()
for ch, comment in keys:
sret += (
'\n'
' {0}:\n'
' {1}'
).format(ch, comment.replace(
'\n',
'\n'
' '))
if not ret['result']:
if 'changes' in ret:
del ret['changes']
raise SaltCloudSystemExit(sret)
log.info(sret)
return sret
def destroy(vm_, call=None):
'''Destroy a lxc container'''
destroy_opt = __opts__.get('destroy', False)
action = __opts__.get('action', '')
if action != 'destroy' and not destroy_opt:
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
if not get_configured_provider():
return
ret = {'comment': '{0} was not found'.format(vm_),
'result': False}
if _salt('lxc.info', vm_):
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(vm_),
{'name': vm_, 'instance_id': vm_},
transport=__opts__['transport']
)
cret = _salt('lxc.destroy', vm_, stop=True)
ret['result'] = cret['result']
if ret['result']:
ret['comment'] = '{0} was destroyed'.format(vm_)
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(vm_),
{'name': vm_, 'instance_id': vm_},
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(vm_, __active_provider_name__.split(':')[0], __opts__)
return ret
def create(vm_, call=None):
'''Create an lxc Container.
This function is idempotent and will try to either provision
or finish the provision of an lxc container.
NOTE: Most of the initialization code has been moved and merged
with the lxc runner and lxc.init functions
'''
__grains__ = _salt('grains.items')
prov = get_configured_provider(vm_)
if not prov:
return
profile = vm_.get('profile', None)
if not profile:
profile = {}
salt.utils.cloud.fire_event(
'event', 'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
{'name': vm_['name'], 'profile': vm_['profile'],
'provider': vm_['provider'], },
transport=__opts__['transport'])
ret = {'name': vm_['name'], 'changes': {}, 'result': True, 'comment': ''}
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for {0}'.format(vm_['name']))
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize', vm_, __opts__))
# get the minion key pair to distribute back to the container
kwarg = copy.deepcopy(vm_)
kwarg['host'] = prov['target']
cret = _runner().cmd('lxc.cloud_init', [vm_['name']], kwarg=kwarg)
ret['runner_return'] = cret
ret['result'] = cret['result']
if not ret['result']:
ret['Error'] = 'Error while creating {0},'.format(vm_['name'])
else:
ret['changes']['created'] = 'created'
# When using cloud states to manage LXC containers
# __opts__['profile'] is not implicitly reset between operations
# on different containers. However list_nodes will hide container
# if profile is set in opts assuming that it have to be created.
# But in cloud state we do want to check at first if it really
# exists hence the need to remove profile from global opts once
# current container is created.
if 'profile' in __opts__:
del __opts__['profile']
return ret
def get_provider(name):
data = None
if name in __opts__['providers']:
data = __opts__['providers'][name]
if 'lxc' in data:
data = data['lxc']
else:
data = None
return data
def get_configured_provider(vm_=None):
'''
Return the contextual provider of None if no configured
one can be found.
'''
if vm_ is None:
vm_ = {}
dalias, driver = __active_provider_name__.split(':')
data = None
tgt = 'unknown'
img_provider = __opts__.get('list_images', '')
arg_providers = __opts__.get('names', [])
matched = False
# --list-images level
if img_provider:
tgt = 'provider: {0}'.format(img_provider)
if dalias == img_provider:
data = get_provider(img_provider)
matched = True
# providers are set in configuration
if not data and 'profile' not in __opts__ and arg_providers:
for name in arg_providers:
tgt = 'provider: {0}'.format(name)
if dalias == name:
data = get_provider(name)
if data:
matched = True
break
# -p is providen, get the uplinked provider
elif 'profile' in __opts__:
curprof = __opts__['profile']
profs = __opts__['profiles']
tgt = 'profile: {0}'.format(curprof)
if (
curprof in profs
and profs[curprof]['provider'] == __active_provider_name__
):
prov, cdriver = profs[curprof]['provider'].split(':')
tgt += ' provider: {0}'.format(prov)
data = get_provider(prov)
matched = True
# fallback if we have only __active_provider_name__
if ((__opts__.get('destroy', False) and not data)
or (not matched and __active_provider_name__)):
data = __opts__.get('providers',
{}).get(dalias, {}).get(driver, {})
# in all cases, verify that the linked saltmaster is alive.
if data:
try:
ret = _salt('test.ping', salt_target=data['target'])
if not ret:
raise Exception('error')
return data
except Exception:
raise SaltCloudSystemExit(
'Configured provider {0} minion: {1} is unreachable'.format(
__active_provider_name__, data['target']))
return False
| 31.314176 | 110 | 0.554448 |
0d25938bccb296369575cb185a9f55ac3e95f127 | 480 | py | Python | pyifc/compress/__init__.py | tbrus/pyifc | 8e93c6e9f26151de8629300e45d3005e19eef3a3 | [
"MIT"
] | 1 | 2022-03-07T16:19:02.000Z | 2022-03-07T16:19:02.000Z | pyifc/compress/__init__.py | tbrus/pyifc | 8e93c6e9f26151de8629300e45d3005e19eef3a3 | [
"MIT"
] | null | null | null | pyifc/compress/__init__.py | tbrus/pyifc | 8e93c6e9f26151de8629300e45d3005e19eef3a3 | [
"MIT"
] | null | null | null | """
pyifc.compress
--------------
Compress and pack .ifc files.
"""
from pyifc.compress._compress import compress
from pyifc.compress._exceptions import FileExtensionError
from pyifc.compress._pack import compress_and_tar, compress_and_zip
from pyifc.compress._validators import existence_validator, extension_validator
__all__ = [
"compress",
"compress_and_tar",
"compress_and_zip",
"FileExtensionError",
"extension_validator",
"existence_validator",
]
| 24 | 79 | 0.75625 |
5466f2e802b3d5f9ed2819b4ebbf93a886fdbb25 | 569 | py | Python | ci/gen_sources.py | christopherosthues/recompression | 1932cc5aa77a533d9994dbe0c80dbb889a4d25ec | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2019-07-18T12:48:14.000Z | 2022-01-04T13:54:13.000Z | ci/gen_sources.py | christopherosthues/recompression | 1932cc5aa77a533d9994dbe0c80dbb889a4d25ec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | ci/gen_sources.py | christopherosthues/recompression | 1932cc5aa77a533d9994dbe0c80dbb889a4d25ec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
import os.path
for root, sub_dirs, files in os.walk("."):
for file in files:
if file.endswith(".hpp"):
r = root[1:] if root == "." else root[2:]
os.makedirs("../src/" + r + "/", exist_ok=True)
source_file = open("../src/" + r + "/" + file[:len(file)-3] + "cpp", 'w')
if root[2:] == "":
source_file.write('#include "' + file + '"\n')
else:
source_file.write('#include "' + root[2:] + "/" + file + '"\n')
source_file.close()
| 33.470588 | 85 | 0.458699 |
c3eafd1a4dd81bdae26045b9c16378fb3d2770c4 | 580 | py | Python | random_python_stuff/fizz_buzz.py | austinlzno/cssi_project | 2fc875c7c8811f136984b40542c8f1dff41a189f | [
"Apache-2.0"
] | null | null | null | random_python_stuff/fizz_buzz.py | austinlzno/cssi_project | 2fc875c7c8811f136984b40542c8f1dff41a189f | [
"Apache-2.0"
] | null | null | null | random_python_stuff/fizz_buzz.py | austinlzno/cssi_project | 2fc875c7c8811f136984b40542c8f1dff41a189f | [
"Apache-2.0"
] | null | null | null | """My implementation of fizzbuzz."""
import demo_main
def fizzbuzz(number):
if number % 3 == 0 and number % 5 != 0:
print('fizz')
if number % 5 == 0 and number % 3 != 0:
print('buzz')
if number % 3== 0 and number % 5== 0:
print('fizzbuzz')
fizzbuzz(10)
"""
def grocery_list(dairy_name, produce_name, meat_name):
return '{dairy_name}, {produce_name}, {meat_name}'
grocery_list('milk' , 'apples' , 'steak')
"""
if __name__ == '__main__':
fizzbuzz(15)
demo_main.my_funky_function('file transfer')
| 20.714286 | 54 | 0.582759 |
66569a96ee55584dd8ebc707f37d7a75858637b2 | 17,456 | py | Python | python/callingconvention.py | carsonharmon/binaryninja-api | f7ad332ad69d370aa29cd54f4c7307da4d9173e2 | [
"MIT"
] | null | null | null | python/callingconvention.py | carsonharmon/binaryninja-api | f7ad332ad69d370aa29cd54f4c7307da4d9173e2 | [
"MIT"
] | null | null | null | python/callingconvention.py | carsonharmon/binaryninja-api | f7ad332ad69d370aa29cd54f4c7307da4d9173e2 | [
"MIT"
] | null | null | null | # Copyright (c) 2015-2020 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import traceback
import ctypes
# Binary Ninja components
import binaryninja
from binaryninja import _binaryninjacore as core
from binaryninja import log
from binaryninja.enums import VariableSourceType
# 2-3 compatibility
from binaryninja import range
class CallingConvention(object):
name = None
caller_saved_regs = []
callee_saved_regs = []
int_arg_regs = []
float_arg_regs = []
arg_regs_share_index = False
stack_reserved_for_arg_regs = False
stack_adjusted_on_return = False
int_return_reg = None
high_int_return_reg = None
float_return_reg = None
global_pointer_reg = None
implicitly_defined_regs = []
_registered_calling_conventions = []
def __init__(self, arch=None, name=None, handle=None, confidence=binaryninja.types.max_confidence):
if handle is None:
if arch is None or name is None:
self.handle = None
raise ValueError("Must specify either handle or architecture and name")
self._arch = arch
self._pending_reg_lists = {}
self._cb = core.BNCustomCallingConvention()
self._cb.context = 0
self._cb.getCallerSavedRegisters = self._cb.getCallerSavedRegisters.__class__(self._get_caller_saved_regs)
self._cb.getCalleeSavedRegisters = self._cb.getCalleeSavedRegisters.__class__(self._get_callee_saved_regs)
self._cb.getIntegerArgumentRegisters = self._cb.getIntegerArgumentRegisters.__class__(self._get_int_arg_regs)
self._cb.getFloatArgumentRegisters = self._cb.getFloatArgumentRegisters.__class__(self._get_float_arg_regs)
self._cb.freeRegisterList = self._cb.freeRegisterList.__class__(self._free_register_list)
self._cb.areArgumentRegistersSharedIndex = self._cb.areArgumentRegistersSharedIndex.__class__(self._arg_regs_share_index)
self._cb.isStackReservedForArgumentRegisters = self._cb.isStackReservedForArgumentRegisters.__class__(self._stack_reserved_for_arg_regs)
self._cb.isStackAdjustedOnReturn = self._cb.isStackAdjustedOnReturn.__class__(self._stack_adjusted_on_return)
self._cb.getIntegerReturnValueRegister = self._cb.getIntegerReturnValueRegister.__class__(self._get_int_return_reg)
self._cb.getHighIntegerReturnValueRegister = self._cb.getHighIntegerReturnValueRegister.__class__(self._get_high_int_return_reg)
self._cb.getFloatReturnValueRegister = self._cb.getFloatReturnValueRegister.__class__(self._get_float_return_reg)
self._cb.getGlobalPointerRegister = self._cb.getGlobalPointerRegister.__class__(self._get_global_pointer_reg)
self._cb.getImplicitlyDefinedRegisters = self._cb.getImplicitlyDefinedRegisters.__class__(self._get_implicitly_defined_regs)
self._cb.getIncomingRegisterValue = self._cb.getIncomingRegisterValue.__class__(self._get_incoming_reg_value)
self._cb.getIncomingFlagValue = self._cb.getIncomingFlagValue.__class__(self._get_incoming_flag_value)
self._cb.getIncomingVariableForParameterVariable = self._cb.getIncomingVariableForParameterVariable.__class__(self._get_incoming_var_for_parameter_var)
self._cb.getParameterVariableForIncomingVariable = self._cb.getParameterVariableForIncomingVariable.__class__(self._get_parameter_var_for_incoming_var)
self.handle = core.BNCreateCallingConvention(arch.handle, name, self._cb)
self.__class__._registered_calling_conventions.append(self)
else:
self.handle = handle
self.arch = binaryninja.architecture.CoreArchitecture._from_cache(core.BNGetCallingConventionArchitecture(self.handle))
self.__dict__["name"] = core.BNGetCallingConventionName(self.handle)
self.__dict__["arg_regs_share_index"] = core.BNAreArgumentRegistersSharedIndex(self.handle)
self.__dict__["stack_reserved_for_arg_regs"] = core.BNIsStackReservedForArgumentRegisters(self.handle)
self.__dict__["stack_adjusted_on_return"] = core.BNIsStackAdjustedOnReturn(self.handle)
count = ctypes.c_ulonglong()
regs = core.BNGetCallerSavedRegisters(self.handle, count)
result = []
arch = self.arch
for i in range(0, count.value):
result.append(arch.get_reg_name(regs[i]))
core.BNFreeRegisterList(regs, count.value)
self.__dict__["caller_saved_regs"] = result
count = ctypes.c_ulonglong()
regs = core.BNGetCalleeSavedRegisters(self.handle, count)
result = []
arch = self.arch
for i in range(0, count.value):
result.append(arch.get_reg_name(regs[i]))
core.BNFreeRegisterList(regs, count.value)
self.__dict__["callee_saved_regs"] = result
count = ctypes.c_ulonglong()
regs = core.BNGetIntegerArgumentRegisters(self.handle, count)
result = []
arch = self.arch
for i in range(0, count.value):
result.append(arch.get_reg_name(regs[i]))
core.BNFreeRegisterList(regs, count.value)
self.__dict__["int_arg_regs"] = result
count = ctypes.c_ulonglong()
regs = core.BNGetFloatArgumentRegisters(self.handle, count)
result = []
arch = self.arch
for i in range(0, count.value):
result.append(arch.get_reg_name(regs[i]))
core.BNFreeRegisterList(regs, count.value)
self.__dict__["float_arg_regs"] = result
reg = core.BNGetIntegerReturnValueRegister(self.handle)
if reg == 0xffffffff:
self.__dict__["int_return_reg"] = None
else:
self.__dict__["int_return_reg"] = self.arch.get_reg_name(reg)
reg = core.BNGetHighIntegerReturnValueRegister(self.handle)
if reg == 0xffffffff:
self.__dict__["high_int_return_reg"] = None
else:
self.__dict__["high_int_return_reg"] = self.arch.get_reg_name(reg)
reg = core.BNGetFloatReturnValueRegister(self.handle)
if reg == 0xffffffff:
self.__dict__["float_return_reg"] = None
else:
self.__dict__["float_return_reg"] = self.arch.get_reg_name(reg)
reg = core.BNGetGlobalPointerRegister(self.handle)
if reg == 0xffffffff:
self.__dict__["global_pointer_reg"] = None
else:
self.__dict__["global_pointer_reg"] = self.arch.get_reg_name(reg)
count = ctypes.c_ulonglong()
regs = core.BNGetImplicitlyDefinedRegisters(self.handle, count)
result = []
arch = self.arch
for i in range(0, count.value):
result.append(arch.get_reg_name(regs[i]))
core.BNFreeRegisterList(regs, count.value)
self.__dict__["implicitly_defined_regs"] = result
self.confidence = confidence
def __del__(self):
if self.handle is not None:
core.BNFreeCallingConvention(self.handle)
def __repr__(self):
return "<calling convention: %s %s>" % (self.arch.name, self.name)
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash(ctypes.addressof(self.handle.contents))
def _get_caller_saved_regs(self, ctxt, count):
try:
regs = self.__class__.caller_saved_regs
count[0] = len(regs)
reg_buf = (ctypes.c_uint * len(regs))()
for i in range(0, len(regs)):
reg_buf[i] = self.arch.regs[regs[i]].index
result = ctypes.cast(reg_buf, ctypes.c_void_p)
self._pending_reg_lists[result.value] = (result, reg_buf)
return result.value
except:
log.log_error(traceback.format_exc())
count[0] = 0
return None
def _get_callee_saved_regs(self, ctxt, count):
try:
regs = self.__class__.callee_saved_regs
count[0] = len(regs)
reg_buf = (ctypes.c_uint * len(regs))()
for i in range(0, len(regs)):
reg_buf[i] = self.arch.regs[regs[i]].index
result = ctypes.cast(reg_buf, ctypes.c_void_p)
self._pending_reg_lists[result.value] = (result, reg_buf)
return result.value
except:
log.log_error(traceback.format_exc())
count[0] = 0
return None
def _get_int_arg_regs(self, ctxt, count):
try:
regs = self.__class__.int_arg_regs
count[0] = len(regs)
reg_buf = (ctypes.c_uint * len(regs))()
for i in range(0, len(regs)):
reg_buf[i] = self.arch.regs[regs[i]].index
result = ctypes.cast(reg_buf, ctypes.c_void_p)
self._pending_reg_lists[result.value] = (result, reg_buf)
return result.value
except:
log.log_error(traceback.format_exc())
count[0] = 0
return None
def _get_float_arg_regs(self, ctxt, count):
try:
regs = self.__class__.float_arg_regs
count[0] = len(regs)
reg_buf = (ctypes.c_uint * len(regs))()
for i in range(0, len(regs)):
reg_buf[i] = self.arch.regs[regs[i]].index
result = ctypes.cast(reg_buf, ctypes.c_void_p)
self._pending_reg_lists[result.value] = (result, reg_buf)
return result.value
except:
log.log_error(traceback.format_exc())
count[0] = 0
return None
def _free_register_list(self, ctxt, regs):
try:
buf = ctypes.cast(regs, ctypes.c_void_p)
if buf.value not in self._pending_reg_lists:
raise ValueError("freeing register list that wasn't allocated")
del self._pending_reg_lists[buf.value]
except:
log.log_error(traceback.format_exc())
def _arg_regs_share_index(self, ctxt):
try:
return self.__class__.arg_regs_share_index
except:
log.log_error(traceback.format_exc())
return False
def _stack_reserved_for_arg_regs(self, ctxt):
try:
return self.__class__.stack_reserved_for_arg_regs
except:
log.log_error(traceback.format_exc())
return False
def _stack_adjusted_on_return(self, ctxt):
try:
return self.__class__.stack_adjusted_on_return
except:
log.log_error(traceback.format_exc())
return False
def _get_int_return_reg(self, ctxt):
try:
return self.arch.regs[self.__class__.int_return_reg].index
except:
log.log_error(traceback.format_exc())
return False
def _get_high_int_return_reg(self, ctxt):
try:
if self.__class__.high_int_return_reg is None:
return 0xffffffff
return self.arch.regs[self.__class__.high_int_return_reg].index
except:
log.log_error(traceback.format_exc())
return False
def _get_float_return_reg(self, ctxt):
try:
if self.__class__.float_return_reg is None:
return 0xffffffff
return self.arch.regs[self.__class__.float_int_return_reg].index
except:
log.log_error(traceback.format_exc())
return False
def _get_global_pointer_reg(self, ctxt):
try:
if self.__class__.global_pointer_reg is None:
return 0xffffffff
return self.arch.regs[self.__class__.global_pointer_reg].index
except:
log.log_error(traceback.format_exc())
return False
def _get_implicitly_defined_regs(self, ctxt, count):
try:
regs = self.__class__.implicitly_defined_regs
count[0] = len(regs)
reg_buf = (ctypes.c_uint * len(regs))()
for i in range(0, len(regs)):
reg_buf[i] = self.arch.regs[regs[i]].index
result = ctypes.cast(reg_buf, ctypes.c_void_p)
self._pending_reg_lists[result.value] = (result, reg_buf)
return result.value
except:
log.log_error(traceback.format_exc())
count[0] = 0
return None
def _get_incoming_reg_value(self, ctxt, reg, func, result):
try:
func_obj = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
reg_name = self.arch.get_reg_name(reg)
api_obj = self.perform_get_incoming_reg_value(reg_name, func_obj)._to_api_object()
except:
log.log_error(traceback.format_exc())
api_obj = binaryninja.function.RegisterValue()._to_api_object()
result[0].state = api_obj.state
result[0].value = api_obj.value
def _get_incoming_flag_value(self, ctxt, reg, func, result):
try:
func_obj = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
reg_name = self.arch.get_reg_name(reg)
api_obj = self.perform_get_incoming_flag_value(reg_name, func_obj)._to_api_object()
except:
log.log_error(traceback.format_exc())
api_obj = binaryninja.function.RegisterValue()._to_api_object()
result[0].state = api_obj.state
result[0].value = api_obj.value
def _get_incoming_var_for_parameter_var(self, ctxt, in_var, func, result):
try:
if func is None:
func_obj = None
else:
func_obj = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
in_var_obj = binaryninja.function.Variable(func_obj, in_var[0].type, in_var[0].index, in_var[0].storage)
out_var = self.perform_get_incoming_var_for_parameter_var(in_var_obj, func_obj)
result[0].type = out_var.source_type
result[0].index = out_var.index
result[0].storage = out_var.storage
except:
log.log_error(traceback.format_exc())
result[0].type = in_var[0].type
result[0].index = in_var[0].index
result[0].storage = in_var[0].storage
def _get_parameter_var_for_incoming_var(self, ctxt, in_var, func, result):
try:
if func is None:
func_obj = None
else:
func_obj = binaryninja.function.Function(handle = core.BNNewFunctionReference(func))
in_var_obj = binaryninja.function.Variable(func_obj, in_var[0].type, in_var[0].index, in_var[0].storage)
out_var = self.perform_get_parameter_var_for_incoming_var(in_var_obj, func_obj)
result[0].type = out_var.source_type
result[0].index = out_var.index
result[0].storage = out_var.storage
except:
log.log_error(traceback.format_exc())
result[0].type = in_var[0].type
result[0].index = in_var[0].index
result[0].storage = in_var[0].storage
def perform_get_incoming_reg_value(self, reg, func):
reg_stack = self.arch.get_reg_stack_for_reg(reg)
if reg_stack is not None:
if reg == self.arch.reg_stacks[reg_stack].stack_top_reg:
return binaryninja.function.RegisterValue.constant(0)
return binaryninja.function.RegisterValue()
def perform_get_incoming_flag_value(self, reg, func):
return binaryninja.function.RegisterValue()
def perform_get_incoming_var_for_parameter_var(self, in_var, func):
in_buf = core.BNVariable()
in_buf.type = in_var.source_type
in_buf.index = in_var.index
in_buf.storage = in_var.storage
out_var = core.BNGetDefaultIncomingVariableForParameterVariable(self.handle, in_buf)
name = None
if (func is not None) and (out_var.type == VariableSourceType.RegisterVariableSourceType):
name = func.arch.get_reg_name(out_var.storage)
return binaryninja.function.Variable(func, out_var.type, out_var.index, out_var.storage, name)
def perform_get_parameter_var_for_incoming_var(self, in_var, func):
in_buf = core.BNVariable()
in_buf.type = in_var.source_type
in_buf.index = in_var.index
in_buf.storage = in_var.storage
out_var = core.BNGetDefaultParameterVariableForIncomingVariable(self.handle, in_buf)
return binaryninja.function.Variable(func, out_var.type, out_var.index, out_var.storage)
def with_confidence(self, confidence):
return CallingConvention(self.arch, handle = core.BNNewCallingConventionReference(self.handle),
confidence = confidence)
def get_incoming_reg_value(self, reg, func):
reg_num = self.arch.get_reg_index(reg)
func_handle = None
if func is not None:
func_handle = func.handle
return binaryninja.function.RegisterValue(self.arch, core.BNGetIncomingRegisterValue(self.handle, reg_num, func_handle))
def get_incoming_flag_value(self, flag, func):
reg_num = self.arch.get_flag_index(flag)
func_handle = None
if func is not None:
func_handle = func.handle
return binaryninja.function.RegisterValue(self.arch, core.BNGetIncomingFlagValue(self.handle, reg_num, func_handle))
def get_incoming_var_for_parameter_var(self, in_var, func):
in_buf = core.BNVariable()
in_buf.type = in_var.source_type
in_buf.index = in_var.index
in_buf.storage = in_var.storage
if func is None:
func_obj = None
else:
func_obj = func.handle
out_var = core.BNGetIncomingVariableForParameterVariable(self.handle, in_buf, func_obj)
name = None
if (func is not None) and (out_var.type == VariableSourceType.RegisterVariableSourceType):
name = func.arch.get_reg_name(out_var.storage)
return binaryninja.function.Variable(func, out_var.type, out_var.index, out_var.storage, name)
def get_parameter_var_for_incoming_var(self, in_var, func):
in_buf = core.BNVariable()
in_buf.type = in_var.source_type
in_buf.index = in_var.index
in_buf.storage = in_var.storage
if func is None:
func_obj = None
else:
func_obj = func.handle
out_var = core.BNGetParameterVariableForIncomingVariable(self.handle, in_buf, func_obj)
return binaryninja.function.Variable(func, out_var.type, out_var.index, out_var.storage)
@property
def arch(self):
""" """
return self._arch
@arch.setter
def arch(self, value):
self._arch = value
| 38.113537 | 154 | 0.763233 |
8db73a8dc3102c724a1e69dcd463e69c5cca2515 | 3,076 | py | Python | export.py | AlmogCohen/yogasiteorg-export | 10d8ac3a3bf082c2485237d97dfb531dc178a05c | [
"MIT"
] | null | null | null | export.py | AlmogCohen/yogasiteorg-export | 10d8ac3a3bf082c2485237d97dfb531dc178a05c | [
"MIT"
] | null | null | null | export.py | AlmogCohen/yogasiteorg-export | 10d8ac3a3bf082c2485237d97dfb531dc178a05c | [
"MIT"
] | null | null | null | import argparse
import re
import logging
from urlparse import urljoin
import requests
import sys
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
}
MAX_REDIRECTS = 3
logger = logging.getLogger(__name__)
YOGASITE_HOMEPAGE = 'https://yogasite.org'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', help="Yoga site username", required=True)
parser.add_argument('-a', '--admin', help="Yoga site admin field", required=True)
parser.add_argument('-p', '--password', help="Yoga site password", required=True)
parser.add_argument('-o', '--output', default="dump.zip", help="Output dump filename")
return parser.parse_args()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = get_args()
data = {
'pwd': args.password,
'admin': args.admin,
'username': args.user,
'login': 'Log-in',
}
session = requests.session()
session.max_redirects = MAX_REDIRECTS
session.headers = HEADERS
# Load the homepage to get the relevant session cookies
homepage = session.get(YOGASITE_HOMEPAGE)
logger.info("Login to website")
login = session.post(YOGASITE_HOMEPAGE, data=data, allow_redirects=False)
# Yoga site redirect on succesful login
is_login_successfull = login.status_code == 302
if not is_login_successfull:
logger.error("Unable to login into yogasite.org with username: {}. Please check the credentials provided".format(args.user))
sys.exit()
logger.info("Create new mysql dump")
# This request will be blocking until the backup is ready
create_mysql_dump = session.get(urljoin(YOGASITE_HOMEPAGE, '/admin_admin/mysqldump.php'))
if not create_mysql_dump.status_code == 200:
logger.error("Failed to inititate the sql dump back")
sys.exit()
logger.info("Find the newly created dump zip")
# Now, locate the download link from the backup.php page
backup_page = session.get(urljoin(YOGASITE_HOMEPAGE, '/admin_admin/backup.php'))
if not backup_page.status_code == 200:
logger.error("Failed getting the backup page while looking for the dump download link")
sys.exit()
download_link = re.findall("\.\.(.+?\.zip)", backup_page.content)
if not download_link:
logger.error("Unable to find the zip download from the backup page")
sys.exit()
download_link = urljoin(YOGASITE_HOMEPAGE, download_link[0])
logger.info("Download mysql dump from: {}".format(download_link))
dump_stream = session.get(download_link, stream=True)
if not dump_stream.status_code == 200:
logger.error("Unable to download dump file from: {}".format(download_link))
sys.exit()
with open(args.output, 'wb') as handle:
for block in dump_stream.iter_content(1024):
handle.write(block)
logger.info("##### SUCCESS: Dump saved to: {}".format(args.output))
logger.info("Done.") | 35.356322 | 142 | 0.687581 |
6539d38528ea4be12e3a32fb7fbec2feb26f2eec | 5,989 | py | Python | model_generator.py | qwertyyuiopasdf/URLComSum | 85538224b7068c757f85c09b2f84663696586795 | [
"MIT"
] | null | null | null | model_generator.py | qwertyyuiopasdf/URLComSum | 85538224b7068c757f85c09b2f84663696586795 | [
"MIT"
] | null | null | null | model_generator.py | qwertyyuiopasdf/URLComSum | 85538224b7068c757f85c09b2f84663696586795 | [
"MIT"
] | null | null | null | from transformers.modeling_gpt2 import GPT2LMHeadModel, GPT2Config
import torch.utils.data.dataset
import utils_tokenizer
import torch, tqdm, math
def pad(data, padval=0):
return torch.nn.utils.rnn.pad_sequence(data, batch_first=True, padding_value=padval)
class GeneTransformer:
def __init__(self, max_output_length=25, max_input_length=300, device='cpu', tokenizer_type='gpt2', bpe_model="", starter_model=None, word_count=None):
if tokenizer_type == "gpt2":
self.tokenizer = utils_tokenizer.GPT2Tokenizer()
config = GPT2Config.from_pretrained("gpt2")
elif tokenizer_type == "bpecap":
self.tokenizer = utils_tokenizer.BPETokenizer(bpe_model)
config = GPT2Config.from_dict({"finetuning_task": None, "initializer_range": 0.02,
"layer_norm_epsilon": 1e-05, "n_ctx": 1024, "n_embd": 768, "n_head": 12, "n_layer": 12, "n_positions": 1024, "num_labels": 1,
"resid_pdrop": 0.1, "use_bfloat16": False, "vocab_size": self.tokenizer.vocab_size})
else:
print("Tokenizer unrecognized. Should be gpt2 or bpecap.")
exit()
self.model = GPT2LMHeadModel(config)
self.model.to(device)
self.device = device
if starter_model is not None:
self.reload(starter_model)
self.max_output_length = max_output_length
self.max_input_length = max_input_length
self.model.train()
self.mode = "train"
if word_count is not None:
self.word_count = word_count
def train_batch(self, bodies, summaries, special_append=None, no_preinput=False):
inputs, summ_inp, summ_out = self.preprocess_batch(bodies, summaries, special_append)
past = None
if not no_preinput:
_, past = self.model(input_ids=inputs, past=None)
#logits, _ = self.model(input_ids=summ_inp, past_key_values=past)
logits, _ = self.model(input_ids=summ_inp, past=past)
crit = torch.nn.CrossEntropyLoss(ignore_index=-1)
loss = crit(logits.view(-1, self.tokenizer.vocab_size), summ_out.contiguous().view(-1))
return loss
def train(self):
self.model.train()
self.mode = 'train'
def eval(self):
self.model.eval()
self.mode = 'eval'
def reload(self, from_file):
print(self.model.load_state_dict(torch.load(from_file)))
def save(self, to_file):
torch.save(self.model.state_dict(), to_file)
def preprocess_input(self, bodies, special_append=None):
if special_append is None:
special_append = [[] for i in range(len(bodies))]
inputs = [torch.LongTensor(spe+self.tokenizer.encode(body)) for body, spe in zip(bodies, special_append)]
inputs = pad(inputs, padval=0)
inputs = inputs[:, :self.max_input_length].to(self.device)
return inputs
def preprocess_batch(self, bodies, summaries, special_append=None):
inputs = self.preprocess_input(bodies, special_append)
# Big hack
if special_append is None:
special_append = [[] for i in range(len(bodies))]
summaries = [spe+self.tokenizer.encode(summ) for summ, spe in zip(summaries, special_append)]
summaries = [summ[:(self.max_output_length-1)] for summ in summaries] # We cut short, but we want the end token at the end
summ_inp = pad([torch.LongTensor([self.tokenizer.start_id]+summ) for summ in summaries], padval=0).to(self.device)
summ_out = pad([torch.LongTensor(summ+[self.tokenizer.end_id]) for summ in summaries], padval=-1).to(self.device)
return inputs, summ_inp, summ_out
def score(self, summaries, bodies, idx_batch=None, bodies_tokenized=None, lengths=None, extra=None):
# Unconditional rating of the summaries
self.model.eval()
inputs, summ_inp, summ_out = self.preprocess_batch(bodies, summaries)
summ_out = summ_out.contiguous()
with torch.no_grad():
logits, _ = self.model(input_ids=summ_inp[:1024], past=None)
crit = torch.nn.CrossEntropyLoss(ignore_index=-1, reduction='none')
loss = crit(logits.view(-1, self.tokenizer.vocab_size), summ_out.view(-1)).view(summ_out.shape)
mask = (summ_inp != torch.LongTensor([0]).to(self.device)).float()
non_pad_count = torch.sum(mask, dim=1)
p_us = []
total_word = sum(self.word_count.values())
for idx, summary in enumerate(summaries):
tokens = self.tokenizer.encode(' '.join(summary))
p_u = 1
for token in tokens:
try:
p_u *= self.word_count[token]/total_word
except:
p_u *= 1 /total_word#in case the word is not found in the training dataset
p_us.append(math.log(p_u+0.001))
p_us = torch.tensor(p_us).to(self.device)
loss_per = (torch.sum(loss, dim=1) - p_us)/ non_pad_count
score = (10.0 - loss_per) / 10.0
return score.tolist(), None
def score_pairs(self, bodies, summaries):
if self.mode != 'eval':
print("BEWARE. Model is not in eval mode.")
inputs, summ_inp, summ_out = self.preprocess_batch(bodies, summaries)
with torch.no_grad():
_, past = self.model(input_ids=inputs, past=None)
logits, _ = self.model(input_ids=summ_inp, past=past)
crit = torch.nn.CrossEntropyLoss(ignore_index=-1, reduction='none')
loss = crit(logits.view(-1, self.tokenizer.vocab_size), summ_out.view(-1)).view(summ_out.shape)
mask = (summ_inp != torch.LongTensor([0]).to(self.device)).float()
non_pad_count = torch.sum(mask, dim=1)
loss_per = torch.sum(loss, dim=1) / non_pad_count
return loss_per.tolist()
| 41.020548 | 155 | 0.62715 |
b22a7352ec427316337e77e09a52d1916fd2fbf9 | 8,499 | py | Python | transformers/recommendations/matrixfactorization.py | ucds-sg/h2oai | 7042860767dc25d1a7d7122103bbd5016d02df53 | [
"Apache-2.0"
] | null | null | null | transformers/recommendations/matrixfactorization.py | ucds-sg/h2oai | 7042860767dc25d1a7d7122103bbd5016d02df53 | [
"Apache-2.0"
] | null | null | null | transformers/recommendations/matrixfactorization.py | ucds-sg/h2oai | 7042860767dc25d1a7d7122103bbd5016d02df53 | [
"Apache-2.0"
] | null | null | null | """Collaborative filtering features using various techniques of Matrix Factorization for recommendations.
Recommended for large data"""
"""
Add the user column name and item column name in recipe_dict in config to match the
column names as per the dataset or use the default 'user' and 'item' respectively in your dataset
Sample Datasets
# Netflix - https://www.kaggle.com/netflix-inc/netflix-prize-data
recipe_dict = "{'user_col': 'user', 'item_col': 'movie'}"
# MovieLens - https://grouplens.org/datasets/movielens/
recipe_dict = "{'user_col': 'userId', 'item_col': 'movieId'}"
# RPackages - https://www.kaggle.com/c/R/data
recipe_dict = "{'user_col': 'User', 'item_col': 'Package'}"
"""
import datatable as dt
import numpy as np
import pandas as pd
import h2o4gpu
import scipy
from h2oaicore.systemutils import config
from h2oaicore.transformer_utils import CustomTransformer
from sklearn.decomposition import NMF
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
class RecH2OMFTransformer(CustomTransformer):
_multiclass = False
_can_use_gpu = True
_mf_type = "h2o4gpu"
def __init__(self, n_components=50, _lambda=0.1, batches=1, max_iter=100, alpha=0.1, **kwargs):
super().__init__(**kwargs)
self.user_col = config.recipe_dict['user_col'] if "user_col" in config.recipe_dict else "user"
self.item_col = config.recipe_dict['item_col'] if "item_col" in config.recipe_dict else "item"
if self.__class__._mf_type == "h2o4gpu":
self._n_components = n_components
self._lambda = _lambda
self._batches = batches
self._max_iter = max_iter
elif self.__class__._mf_type == "nmf":
self._n_components = n_components
self._alpha = alpha
self._max_iter = max_iter
@staticmethod
def do_acceptance_test():
return False
@staticmethod
def get_default_properties():
return dict(col_type="all", min_cols="all", max_cols="all", relative_importance=1, num_default_instances=1)
@staticmethod
def get_parameter_choices():
return {"n_components": [10, 30, 50, 70, 100],
"_lambda": [0.01, 0.05, 0.1],
"batches": [1],
"max_iter": [10, 50, 100, 200],
"alpha": [0.01, 0.05, 0.1]}
def fit_transform(self, X: dt.Frame, y: np.array = None):
if len(np.unique(self.labels)) == 2:
le = LabelEncoder()
self.labels = le.fit_transform(self.labels)
y = np.array(le.transform(y), dtype="float32")
else:
y = np.array(y, dtype="float32")
X = X[:, [self.user_col, self.item_col]]
self.user_le = LabelEncoder()
self.item_le = LabelEncoder()
X[:, self.user_col] = dt.Frame(self.user_le.fit_transform(X[:, self.user_col]))
X[:, self.item_col] = dt.Frame(self.item_le.fit_transform(X[:, self.item_col]))
X_pd = X.to_pandas()
if len(np.unique(self.labels)) == 2:
kfold = StratifiedKFold(n_splits=10)
else:
kfold = KFold(n_splits=10)
preds = np.full(X.nrows, fill_value=np.nan)
for train_index, val_index in kfold.split(X_pd, y):
X_train, y_train = X_pd.iloc[train_index,], y[train_index]
X_val, y_val = X_pd.iloc[val_index,], y[val_index]
X_val2 = X_val[(X_val[self.user_col].isin(np.unique(X_train[self.user_col]))) & (
X_val[self.item_col].isin(np.unique(X_train[self.item_col])))]
y_val2 = y_val[(X_val[self.user_col].isin(np.unique(X_train[self.user_col]))) & (
X_val[self.item_col].isin(np.unique(X_train[self.item_col])))]
X_panel = pd.concat([X_train, X_val2], axis=0)
users, user_indices = np.unique(np.array(X_panel[self.user_col], dtype="int32"), return_inverse=True)
items, item_indices = np.unique(np.array(X_panel[self.item_col], dtype="int32"), return_inverse=True)
X_train_user_item_matrix = scipy.sparse.coo_matrix(
(y_train, (user_indices[:len(X_train)], item_indices[:len(X_train)])), shape=(len(users), len(items)))
X_train_shape = X_train_user_item_matrix.shape
X_val_user_item_matrix = scipy.sparse.coo_matrix(
(np.ones(len(X_val2), dtype="float32"), (user_indices[len(X_train):], item_indices[len(X_train):])),
shape=X_train_shape)
if self.__class__._mf_type == "h2o4gpu":
factorization = h2o4gpu.solvers.FactorizationH2O(self._n_components, self._lambda,
max_iter=self._max_iter)
factorization.fit(X_train_user_item_matrix, X_BATCHES=self._batches, THETA_BATCHES=self._batches)
preds[val_index[(X_val[self.user_col].isin(np.unique(X_train[self.user_col]))) & (
X_val[self.item_col].isin(np.unique(X_train[self.item_col])))]] = factorization.predict(
X_val_user_item_matrix).data
elif self.__class__._mf_type == "nmf":
factorization = NMF(n_components=self._n_components, alpha=self._alpha, max_iter=self._max_iter)
user_matrix = factorization.fit_transform(X_train_user_item_matrix)
item_matrix = factorization.components_.T
val_users = np.take(user_matrix, X_val_user_item_matrix.row, axis=0)
val_items = np.take(item_matrix, X_val_user_item_matrix.col, axis=0)
preds[val_index[(X_val[self.user_col].isin(np.unique(X_train[self.user_col]))) & (
X_val[self.item_col].isin(np.unique(X_train[self.item_col])))]] = np.sum(val_users * val_items,
axis=1)
users, user_indices = np.unique(np.array(X_pd[self.user_col], dtype="int32"), return_inverse=True)
items, item_indices = np.unique(np.array(X_pd[self.item_col], dtype="int32"), return_inverse=True)
X_train_user_item_matrix = scipy.sparse.coo_matrix(
(y_train, (user_indices[:len(X_train)], item_indices[:len(X_train)])), shape=(len(users), len(items)))
self.X_train_shape = X_train_user_item_matrix.shape
if self.__class__._mf_type == "h2o4gpu":
self.factorization = h2o4gpu.solvers.FactorizationH2O(self._n_components, self._lambda,
max_iter=self._max_iter)
self.factorization.fit(X_train_user_item_matrix, X_BATCHES=self._batches, THETA_BATCHES=self._batches)
elif self.__class__._mf_type == "nmf":
factorization = NMF(n_components=self._n_components, alpha=self._alpha, max_iter=self._max_iter)
self.user_matrix = factorization.fit_transform(X_train_user_item_matrix)
self.item_matrix = factorization.components_.T
return preds
def transform(self, X: dt.Frame):
X = X[:, [self.user_col, self.item_col]]
preds = np.full(X.nrows, fill_value=np.nan)
X_pd = X.to_pandas()
X_test = X_pd[
(X_pd[self.user_col].isin(self.user_le.classes_)) & (X_pd[self.item_col].isin(self.item_le.classes_))]
X_test[self.user_col] = self.user_le.transform(X_test[self.user_col])
X_test[self.item_col] = self.item_le.transform(X_test[self.item_col])
X_test_user_item_matrix = scipy.sparse.coo_matrix(
(np.ones(len(X_test), dtype="float32"), (X_test[self.user_col], X_test[self.item_col])),
shape=self.X_train_shape)
if self.__class__._mf_type == "h2o4gpu":
preds[(X_pd[self.user_col].isin(self.user_le.classes_)) & (
X_pd[self.item_col].isin(self.item_le.classes_))] = self.factorization.predict(
X_test_user_item_matrix).data
elif self.__class__._mf_type == "nmf":
test_users = np.take(self.user_matrix, X_test_user_item_matrix.row, axis=0)
test_items = np.take(self.item_matrix, X_test_user_item_matrix.col, axis=0)
preds[(X_pd[self.user_col].isin(self.user_le.classes_)) & (
X_pd[self.item_col].isin(self.item_le.classes_))] = np.sum(test_users * test_items, axis=1)
return preds
class RecNMFTransformer(RecH2OMFTransformer):
_can_use_gpu = False
_mf_type = "nmf"
| 46.697802 | 118 | 0.638899 |
a05b9e80f6c1b9e8373cfd8576222dbfca68073f | 9,849 | py | Python | gssnng/score_funs.py | redst4r/gssnng | e8174e030cf85d6bade41dc26aa9c0f3929f3584 | [
"MIT"
] | null | null | null | gssnng/score_funs.py | redst4r/gssnng | e8174e030cf85d6bade41dc26aa9c0f3929f3584 | [
"MIT"
] | null | null | null | gssnng/score_funs.py | redst4r/gssnng | e8174e030cf85d6bade41dc26aa9c0f3929f3584 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import gssnng.util as si
import statsmodels.robust.scale
def summed_up(su):
"""
Just sum up the counts
:param x: the pandas data frame of ranks, all genes
:param su: the ranked list of genes *IN* the gene set
:param sig_len_up: the number of expressed genes matched in the set
:param norm_method: 'standard or theoretical' # from singscore
:param score_up: is the rank up or down? True or False
"""
# normalise the score for the number of genes in the signature
score = np.sum(su)
return(score)
def median_score(su):
"""
Average Z score
:param x: the pandas data frame of ranks, all genes
:param su: the ranked list of genes *IN* the gene set
:param sig_len_up: the number of expressed genes matched in the set
:param norm_method: 'standard or theoretical' # from singscore
:param score_up: is the rank up or down? True or False
"""
# normalise the score for the number of genes in the signature
cnts_med = np.median(su)
return(cnts_med)
def average_score(su):
"""
Average Z score
:param x: the pandas data frame of ranks, all genes
:param su: the ranked list of genes *IN* the gene set
:param sig_len_up: the number of expressed genes matched in the set
:param norm_method: 'standard or theoretical' # from singscore
:param score_up: is the rank up or down? True or False
"""
# normalise the score for the number of genes in the signature
cnts_mean = np.mean(su)
return(cnts_mean)
def mean_z(allexprvals, genesetvals):
"""
Average Z score
:param exprdat: the pandas data frame of ranks, all genes
:param su: the ranked list of genes *IN* the gene set
:param sig_len_up: the number of expressed genes matched in the set
:param norm_method: 'standard or theoretical' # from singscore
:param score_up: is the rank up or down? True or False
"""
# normalise the score for the number of genes in the signature
vals_mean = np.mean(allexprvals)
vals_std = np.std(allexprvals)
centered = [ (np.abs(x - vals_mean) / vals_std) for x in genesetvals ]
score = np.mean(centered)
return(score)
def robust_std(exprdat, su):
"""
Median of median standardized counts
:param exprdat: the pandas data frame of ranks, all genes
:param su: the ranked list of genes *IN* the gene set
:param sig_len_up: the number of expressed genes matched in the set
:param norm_method: 'standard or theoretical' # from singscore
:param score_up: is the rank up or down? True or False
"""
# normalise the score for the number of genes in the signature
cnts_med = np.median(exprdat)
mad_su = statsmodels.robust.scale.mad(exprdat)
centered_cnts = [ (np.abs(x - cnts_med) / mad_su) for x in su ]
score = np.median(centered_cnts)
return(score)
def rank_biased_overlap(x, exprcol, gs, geneset_genes, limit):
"""
Rank biased overlap method
:param x: the pandas data frame of ranks, all genes
:param su: the ranked list of genes *IN* the gene set
:param gs: the gene set
"""
rbo_score = 0.0
# if undirected, then sort based on centered values
if gs.mode == '?':
# center & absolute value ranks
maxN = np.ceil(len(x.index)/2.0)
x['undir'] = [ np.abs(xi - maxN) for xi in x[exprcol]]
exprcol = 'undir'
# get sorted dataframe
x_sorted = x.sort_values(by=exprcol, ascending=False)
y = x_sorted[exprcol]
for i in range(limit):
subset = set(y.index[0:(i+1)])
rbo_score += len(subset.intersection(geneset_genes))
return( rbo_score )
def singscore(x, su, sig_len, norm_method, gs):
"""
The singscore method
:param x: the pandas data frame of ranks, all genes
:param su: the ranked list of genes *IN* the gene set
:param sig_len_up: the number of expressed genes matched in the set
:param norm_method: 'standard or theoretical' # from singscore
:param score_up: is the rank up or down? True or False
:param gs: gene set object
"""
# normalise the score for the number of genes in the signature
if gs.mode == '?':
# center & absolute value ranks
maxN = np.ceil(len(x.index)/2.0)
su = [ np.abs(xi - maxN) for xi in su]
mean_rank = np.mean(su)
norm_up = si.normalisation(norm_method=norm_method,
gs_mode=gs.mode,
score=mean_rank,
library_len=len(x.index),
sig_len=sig_len)
if gs.mode != '?':
norm_up = norm_up - 0.5
return(norm_up)
def ssgsea(x, su, sig_len, omega, gs):
"""
The ssGSEA method
:param x: the pandas data frame of ranks, all genes
:param su: the ranked list of genes *IN* the gene set
:param sig_len_up: the number of expressed genes matched in the set
:param norm_method: 'standard or theoretical' # from singscore
:param score_up: is the rank up or down? True or False
:param gene_set: gene set object
"""
gene_set = set(gs)
#first sort by absolute expression value, starting with the highest expressed genes first
xsorted = x.sort_values(axis=0, ascending=False, inplace=False)
keys_sorted = xsorted.index.tolist()
#values representing the ECDF of genes in the geneset
P_GW_numerator = 0
P_GW_denominator = 0
#determining denominator value
i = 1 #current rank stepping through listing of sorted genes
for gene in keys_sorted:
if gene in gene_set:
P_GW_denominator += i ** omega
i += 1
P_GW = lambda : P_GW_numerator / P_GW_denominator
#values representing the ECDF of genes not in the geneset
P_NG_numerator = 0
P_NG_denominator = len(x) - len(gene_set)
P_NG = lambda : P_NG_numerator / P_NG_denominator
#integrate different in P_GW and P_NG
i = 1 #current index in the traversal of sorted genes
scores = []
for gene in keys_sorted:
if gene in gene_set:
P_GW_numerator += i ** omega
else:
P_NG_numerator += 1
scores.append(P_GW() - P_NG())
i += 1
return sum(scores)
def expr_format(x, exprcol, geneset_genes):
sig_len_up = len(geneset_genes)
su = []
for j in geneset_genes:
if j in x.index:
su.append(x[exprcol][j])
else:
sig_len_up = sig_len_up - 1
return( (su, sig_len_up) )
def method_selector(gs, x, exprcol, geneset_genes, method, method_params):
"""
:param gs: the gene set
:param x: the gene expr data frame
:param exprcol: the column containing values we'll compute on
:param geneset_genes: genes in the gene set
:param method: the method we'll call
:param method_params: dictionary of method parameters
:param barcode: cell barcode
:return: dictionary of results
"""
(su, sig_len) = expr_format(x, exprcol, geneset_genes)
exprdat = x[exprcol]
if method == 'singscore':
res0 = singscore(exprdat, su, sig_len, method_params['normalization'], gs)
elif method == 'robust_std':
res0 = robust_std(exprdat, su)
elif method == 'summed_up':
res0 = summed_up(su)
elif method == 'median_score':
res0 = median_score(su)
elif method == 'average_score':
res0 = average_score(su)
elif method == 'mean_z':
res0 = mean_z(exprdat, su)
elif method == 'rank_biased_overlap':
res0 = rank_biased_overlap(x, exprcol, gs, geneset_genes, method_params['rbo_depth'])
elif method == 'ssgsea':
#x, su, sig_len, omega, gene_set
res0 = ssgsea(exprdat, su, sig_len, method_params['omega'], geneset_genes)
else:
return(np.nan)
return(res0)
def scorefun(gs,
x,
method,
method_params,
ranked):
"""
given a ranked list, produce a score
:param gs: the gene set
:param x: the pandas data frame of ranks, all genes
:param method: the method we'll call
:param method_params: dictionary of method parameters
:param ranked: ranked data? True | False
:return a score
"""
try:
if (gs.mode == 'UP') and (ranked == False):
res0 = method_selector(gs, x, 'counts', gs.genes_up, method, method_params)
elif (gs.mode == 'DN') and (ranked == False):
res0 = method_selector(gs, x, 'counts', gs.genes_dn, method, method_params)
elif (gs.mode == 'BOTH') and (ranked == False):
res0_up = method_selector(gs, x, 'counts', gs.genes_up, method, method_params)
res0_dn = method_selector(gs, x, 'counts', gs.genes_dn, method, method_params)
res0 = (res0_up + res0_dn)
elif (gs.mode == '?') and (ranked == False):
res0 = method_selector(gs, x, 'counts', gs.genes_up, method, method_params)
elif (gs.mode == 'UP') and (ranked == True):
res0 = method_selector(gs, x, 'uprank', gs.genes_up, method, method_params)
elif (gs.mode == 'DN') and (ranked == True):
res0 = method_selector(gs, x, 'dnrank', gs.genes_dn, method, method_params)
elif (gs.mode == 'BOTH') and (ranked == True):
res0_up = method_selector(gs, x, 'uprank', gs.genes_up , method, method_params)
res0_dn = method_selector(gs, x, 'dnrank', gs.genes_dn, method, method_params)
res0 = (res0_up + res0_dn)
elif (gs.mode == '?') and (ranked == True):
res0 = method_selector(gs, x, 'uprank', gs.genes_up, method, method_params)
except ():
#res1 = dict(barcode = barcode, name=gs.name, mode=gs.mode, score=np.nan, var=np.nan)
res0 = np.nan
return(res0)
| 31.977273 | 93 | 0.635394 |
41989de8922990395fec56253b6a58e2f8a87c35 | 90,770 | py | Python | svgpathtools/path.py | tianchishang/svgpathtools | 5d65d575b57d12b3d7c626e789fe1cb117e6e94e | [
"MIT"
] | 1 | 2017-08-04T03:22:57.000Z | 2017-08-04T03:22:57.000Z | svgpathtools/path.py | tianchishang/svgpathtools | 5d65d575b57d12b3d7c626e789fe1cb117e6e94e | [
"MIT"
] | null | null | null | svgpathtools/path.py | tianchishang/svgpathtools | 5d65d575b57d12b3d7c626e789fe1cb117e6e94e | [
"MIT"
] | 1 | 2019-02-13T06:42:44.000Z | 2019-02-13T06:42:44.000Z | """This submodule contains the class definitions of the the main five classes
svgpathtools is built around: Path, Line, QuadraticBezier, CubicBezier, and
Arc."""
# External dependencies
from __future__ import division, absolute_import, print_function
from math import sqrt, cos, sin, acos, degrees, radians, log, pi
from cmath import exp, sqrt as csqrt, phase
from collections import MutableSequence
from warnings import warn
from operator import itemgetter
import numpy as np
try:
from scipy.integrate import quad
_quad_available = True
except:
_quad_available = False
# Internal dependencies
from .bezier import (bezier_intersections, bezier_bounding_box, split_bezier,
bezier_by_line_intersections, polynomial2bezier)
from .misctools import BugException
from .polytools import rational_limit, polyroots, polyroots01, imag, real
# Default Parameters ##########################################################
# path segment .length() parameters for arc length computation
LENGTH_MIN_DEPTH = 5
LENGTH_ERROR = 1e-12
USE_SCIPY_QUAD = True # for elliptic Arc segment arc length computation
# path segment .ilength() parameters for inverse arc length computation
ILENGTH_MIN_DEPTH = 5
ILENGTH_ERROR = 1e-12
ILENGTH_S_TOL = 1e-12
ILENGTH_MAXITS = 10000
# compatibility/implementation related warnings and parameters
CLOSED_WARNING_ON = True
_NotImplemented4ArcException = \
Exception("This method has not yet been implemented for Arc objects.")
# _NotImplemented4QuadraticException = \
# Exception("This method has not yet been implemented for QuadraticBezier "
# "objects.")
_is_smooth_from_warning = \
("The name of this method is somewhat misleading (yet kept for "
"compatibility with scripts created using svg.path 2.0). This method "
"is meant only for d-string creation and should NOT be used to check "
"for kinks. To check a segment for differentiability, use the "
"joins_smoothly_with() method instead or the kinks() function (in "
"smoothing.py).\nTo turn off this warning, set "
"warning_on=False.")
# Miscellaneous ###############################################################
def bezier_segment(*bpoints):
if len(bpoints) == 2:
return Line(*bpoints)
elif len(bpoints) == 4:
return CubicBezier(*bpoints)
elif len(bpoints) == 3:
return QuadraticBezier(*bpoints)
else:
assert len(bpoints) in (2, 3, 4)
def is_bezier_segment(seg):
return (isinstance(seg, Line) or
isinstance(seg, QuadraticBezier) or
isinstance(seg, CubicBezier))
def is_path_segment(seg):
return is_bezier_segment(seg) or isinstance(seg, Arc)
def is_bezier_path(path):
"""Checks that all segments in path are a Line, QuadraticBezier, or
CubicBezier object."""
return isinstance(path, Path) and all(map(is_bezier_segment, path))
def concatpaths(list_of_paths):
"""Takes in a sequence of paths and returns their concatenations into a
single path (following the order of the input sequence)."""
return Path(*[seg for path in list_of_paths for seg in path])
def bbox2path(xmin, xmax, ymin, ymax):
"""Converts a bounding box 4-tuple to a Path object."""
b = Line(xmin + 1j*ymin, xmax + 1j*ymin)
t = Line(xmin + 1j*ymax, xmax + 1j*ymax)
r = Line(xmax + 1j*ymin, xmax + 1j*ymax)
l = Line(xmin + 1j*ymin, xmin + 1j*ymax)
return Path(b, r, t.reversed(), l.reversed())
def polyline(*points):
"""Converts a list of points to a Path composed of lines connecting those
points (i.e. a linear spline or polyline). See also `polygon()`."""
return Path(*[Line(points[i], points[i+1])
for i in range(len(points) - 1)])
def polygon(*points):
"""Converts a list of points to a Path composed of lines connecting those
points, then closes the path by connecting the last point to the first.
See also `polyline()`."""
return Path(*[Line(points[i], points[(i + 1) % len(points)])
for i in range(len(points))])
# Conversion###################################################################
def bpoints2bezier(bpoints):
"""Converts a list of length 2, 3, or 4 to a CubicBezier, QuadraticBezier,
or Line object, respectively.
See also: poly2bez."""
order = len(bpoints) - 1
if order == 3:
return CubicBezier(*bpoints)
elif order == 2:
return QuadraticBezier(*bpoints)
elif order == 1:
return Line(*bpoints)
else:
assert len(bpoints) in {2, 3, 4}
def poly2bez(poly, return_bpoints=False):
"""Converts a cubic or lower order Polynomial object (or a sequence of
coefficients) to a CubicBezier, QuadraticBezier, or Line object as
appropriate. If return_bpoints=True then this will instead only return
the control points of the corresponding Bezier curve.
Note: The inverse operation is available as a method of CubicBezier,
QuadraticBezier and Line objects."""
bpoints = polynomial2bezier(poly)
if return_bpoints:
return bpoints
else:
return bpoints2bezier(bpoints)
def bez2poly(bez, numpy_ordering=True, return_poly1d=False):
"""Converts a Bezier object or tuple of Bezier control points to a tuple
of coefficients of the expanded polynomial.
return_poly1d : returns a numpy.poly1d object. This makes computations
of derivatives/anti-derivatives and many other operations quite quick.
numpy_ordering : By default (to accommodate numpy) the coefficients will
be output in reverse standard order.
Note: This function is redundant thanks to the .poly() method included
with all bezier segment classes."""
if is_bezier_segment(bez):
bez = bez.bpoints()
return bezier2polynomial(bez,
numpy_ordering=numpy_ordering,
return_poly1d=return_poly1d)
# Geometric####################################################################
def rotate(curve, degs, origin=None):
"""Returns curve rotated by `degs` degrees (CCW) around the point `origin`
(a complex number). By default origin is either `curve.point(0.5)`, or in
the case that curve is an Arc object, `origin` defaults to `curve.center`.
"""
def transform(z):
return exp(1j*radians(degs))*(z - origin) + origin
if origin == None:
if isinstance(curve, Arc):
origin = curve.center
else:
origin = curve.point(0.5)
if isinstance(curve, Path):
return Path(*[rotate(seg, degs, origin=origin) for seg in curve])
elif is_bezier_segment(curve):
return bpoints2bezier([transform(bpt) for bpt in curve.bpoints()])
elif isinstance(curve, Arc):
new_start = transform(curve.start)
new_end = transform(curve.end)
new_rotation = curve.rotation + degs
return Arc(new_start, radius=curve.radius, rotation=new_rotation,
large_arc=curve.large_arc, sweep=curve.sweep, end=new_end)
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.")
def translate(curve, z0):
"""Shifts the curve by the complex quantity z such that
translate(curve, z0).point(t) = curve.point(t) + z0"""
if isinstance(curve, Path):
return Path(*[translate(seg, z0) for seg in curve])
elif is_bezier_segment(curve):
return bpoints2bezier([bpt + z0 for bpt in curve.bpoints()])
elif isinstance(curve, Arc):
new_start = curve.start + z0
new_end = curve.end + z0
return Arc(new_start, radius=curve.radius, rotation=curve.rotation,
large_arc=curve.large_arc, sweep=curve.sweep, end=new_end)
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.")
def bezier_unit_tangent(seg, t):
"""Returns the unit tangent of the segment at t.
Notes
-----
If you receive a RuntimeWarning, try the following:
>>> import numpy
>>> old_numpy_error_settings = numpy.seterr(invalid='raise')
This can be undone with:
>>> numpy.seterr(**old_numpy_error_settings)
"""
assert 0 <= t <= 1
dseg = seg.derivative(t)
# Note: dseg might be numpy value, use np.seterr(invalid='raise')
try:
unit_tangent = dseg/abs(dseg)
except (ZeroDivisionError, FloatingPointError):
# This may be a removable singularity, if so we just need to compute
# the limit.
# Note: limit{{dseg / abs(dseg)} = sqrt(limit{dseg**2 / abs(dseg)**2})
dseg_poly = seg.poly().deriv()
dseg_abs_squared_poly = (real(dseg_poly) ** 2 +
imag(dseg_poly) ** 2)
try:
unit_tangent = csqrt(rational_limit(dseg_poly**2,
dseg_abs_squared_poly, t))
except ValueError:
bef = seg.poly().deriv()(t - 1e-4)
aft = seg.poly().deriv()(t + 1e-4)
mes = ("Unit tangent appears to not be well-defined at "
"t = {}, \n".format(t) +
"seg.poly().deriv()(t - 1e-4) = {}\n".format(bef) +
"seg.poly().deriv()(t + 1e-4) = {}".format(aft))
raise ValueError(mes)
return unit_tangent
def segment_curvature(self, t, use_inf=False):
"""returns the curvature of the segment at t.
Notes
-----
If you receive a RuntimeWarning, run command
>>> old = np.seterr(invalid='raise')
This can be undone with
>>> np.seterr(**old)
"""
dz = self.derivative(t)
ddz = self.derivative(t, n=2)
dx, dy = dz.real, dz.imag
ddx, ddy = ddz.real, ddz.imag
old_np_seterr = np.seterr(invalid='raise')
try:
kappa = abs(dx*ddy - dy*ddx)/sqrt(dx*dx + dy*dy)**3
except (ZeroDivisionError, FloatingPointError):
# tangent vector is zero at t, use polytools to find limit
p = self.poly()
dp = p.deriv()
ddp = dp.deriv()
dx, dy = real(dp), imag(dp)
ddx, ddy = real(ddp), imag(ddp)
f2 = (dx*ddy - dy*ddx)**2
g2 = (dx*dx + dy*dy)**3
lim2 = rational_limit(f2, g2, t)
if lim2 < 0: # impossible, must be numerical error
return 0
kappa = sqrt(lim2)
finally:
np.seterr(**old_np_seterr)
return kappa
def bezier_radialrange(seg, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize and
maximize, respectively, the distance d = |self.point(t)-origin|.
return_all_global_extrema: Multiple such t_min or t_max values can exist.
By default, this will only return one. Set return_all_global_extrema=True
to return all such global extrema."""
def _radius(tau):
return abs(seg.point(tau) - origin)
shifted_seg_poly = seg.poly() - origin
r_squared = real(shifted_seg_poly) ** 2 + \
imag(shifted_seg_poly) ** 2
extremizers = [0, 1] + polyroots01(r_squared.deriv())
extrema = [(_radius(t), t) for t in extremizers]
if return_all_global_extrema:
raise NotImplementedError
else:
seg_global_min = min(extrema, key=itemgetter(0))
seg_global_max = max(extrema, key=itemgetter(0))
return seg_global_min, seg_global_max
def closest_point_in_path(pt, path):
"""returns (|path.seg.point(t)-pt|, t, seg_idx) where t and seg_idx
minimize the distance between pt and curve path[idx].point(t) for 0<=t<=1
and any seg_idx.
Warning: Multiple such global minima can exist. This will only return
one."""
return path.radialrange(pt)[0]
def farthest_point_in_path(pt, path):
"""returns (|path.seg.point(t)-pt|, t, seg_idx) where t and seg_idx
maximize the distance between pt and curve path[idx].point(t) for 0<=t<=1
and any seg_idx.
:rtype : object
:param pt:
:param path:
Warning: Multiple such global maxima can exist. This will only return
one."""
return path.radialrange(pt)[1]
def path_encloses_pt(pt, opt, path):
"""returns true if pt is a point enclosed by path (which must be a Path
object satisfying path.isclosed==True). opt is a point you know is
NOT enclosed by path."""
assert path.isclosed()
intersections = Path(Line(pt, opt)).intersect(path)
if len(intersections) % 2:
return True
else:
return False
def segment_length(curve, start, end, start_point, end_point,
error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH, depth=0):
"""Recursively approximates the length by straight lines"""
mid = (start + end)/2
mid_point = curve.point(mid)
length = abs(end_point - start_point)
first_half = abs(mid_point - start_point)
second_half = abs(end_point - mid_point)
length2 = first_half + second_half
if (length2 - length > error) or (depth < min_depth):
# Calculate the length of each segment:
depth += 1
return (segment_length(curve, start, mid, start_point, mid_point,
error, min_depth, depth) +
segment_length(curve, mid, end, mid_point, end_point,
error, min_depth, depth))
# This is accurate enough.
return length2
def inv_arclength(curve, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""INPUT: curve should be a CubicBezier, Line, of Path of CubicBezier
and/or Line objects.
OUTPUT: Returns a float, t, such that the arc length of curve from 0 to
t is approximately s.
s_tol - exit when |s(t) - s| < s_tol where
s(t) = seg.length(0, t, error, min_depth) and seg is either curve or,
if curve is a Path object, then seg is a segment in curve.
error - used to compute lengths of cubics and arcs
min_depth - used to compute lengths of cubics and arcs
Note: This function is not designed to be efficient, but if it's slower
than you need, make sure you have scipy installed."""
curve_length = curve.length(error=error, min_depth=min_depth)
assert curve_length > 0
if not 0 <= s <= curve_length:
raise ValueError("s is not in interval [0, curve.length()].")
if s == 0:
return 0
if s == curve_length:
return 1
if isinstance(curve, Path):
seg_lengths = [seg.length(error=error, min_depth=min_depth) for seg in curve]
lsum = 0
# Find which segment the point we search for is located on
for k, len_k in enumerate(seg_lengths):
if lsum <= s <= lsum + len_k:
t = inv_arclength(curve[k], s - lsum, s_tol=s_tol, maxits=maxits, error=error, min_depth=min_depth)
return curve.t2T(k, t)
lsum += len_k
return 1
elif isinstance(curve, Line):
return s / curve.length(error=error, min_depth=min_depth)
elif (isinstance(curve, QuadraticBezier) or
isinstance(curve, CubicBezier) or
isinstance(curve, Arc)):
t_upper = 1
t_lower = 0
iteration = 0
while iteration < maxits:
iteration += 1
t = (t_lower + t_upper)/2
s_t = curve.length(t1=t, error=error, min_depth=min_depth)
if abs(s_t - s) < s_tol:
return t
elif s_t < s: # t too small
t_lower = t
else: # s < s_t, t too big
t_upper = t
if t_upper == t_lower:
warn("t is as close as a float can be to the correct value, "
"but |s(t) - s| = {} > s_tol".format(abs(s_t-s)))
return t
raise Exception("Maximum iterations reached with s(t) - s = {}."
"".format(s_t - s))
else:
raise TypeError("First argument must be a Line, QuadraticBezier, "
"CubicBezier, Arc, or Path object.")
# Operations###################################################################
def crop_bezier(seg, t0, t1):
"""returns a cropped copy of this segment which starts at self.point(t0)
and ends at self.point(t1)."""
assert t0 < t1
if t0 == 0:
cropped_seg = seg.split(t1)[0]
elif t1 == 1:
cropped_seg = seg.split(t0)[1]
else:
pt1 = seg.point(t1)
# trim off the 0 <= t < t0 part
trimmed_seg = crop_bezier(seg, t0, 1)
# find the adjusted t1 (i.e. the t1 such that
# trimmed_seg.point(t1) ~= pt))and trim off the t1 < t <= 1 part
t1_adj = trimmed_seg.radialrange(pt1)[0][1]
cropped_seg = crop_bezier(trimmed_seg, 0, t1_adj)
return cropped_seg
# Main Classes ################################################################
class Line(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __repr__(self):
return 'Line(start=%s, end=%s)' % (self.start, self.end)
def __eq__(self, other):
if not isinstance(other, Line):
return NotImplemented
return self.start == other.start and self.end == other.end
def __ne__(self, other):
if not isinstance(other, Line):
return NotImplemented
return not self == other
def __getitem__(self, item):
return self.bpoints()[item]
def __len__(self):
return 2
def joins_smoothly_with(self, previous, wrt_parameterization=False):
"""Checks if this segment joins smoothly with previous segment. By
default, this only checks that this segment starts moving (at t=0) in
the same direction (and from the same positive) as previous stopped
moving (at t=1). To check if the tangent magnitudes also match, set
wrt_parameterization=True."""
if wrt_parameterization:
return self.start == previous.end and np.isclose(
self.derivative(0), previous.derivative(1))
else:
return self.start == previous.end and np.isclose(
self.unit_tangent(0), previous.unit_tangent(1))
def point(self, t):
"""returns the coordinates of the Bezier curve evaluated at t."""
distance = self.end - self.start
return self.start + distance*t
def length(self, t0=0, t1=1, error=None, min_depth=None):
"""returns the length of the line segment between t0 and t1."""
return abs(self.end - self.start)*(t1-t0)
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
def bpoints(self):
"""returns the Bezier control points of the segment."""
return self.start, self.end
def poly(self, return_coeffs=False):
"""returns the line as a Polynomial object."""
p = self.bpoints()
coeffs = ([p[1] - p[0], p[0]])
if return_coeffs:
return coeffs
else:
return np.poly1d(coeffs)
def derivative(self, t=None, n=1):
"""returns the nth derivative of the segment at t."""
assert self.end != self.start
if n == 1:
return self.end - self.start
elif n > 1:
return 0
else:
raise ValueError("n should be a positive integer.")
def unit_tangent(self, t=None):
"""returns the unit tangent of the segment at t."""
assert self.end != self.start
dseg = self.end - self.start
return dseg/abs(dseg)
def normal(self, t=None):
"""returns the (right hand rule) unit normal vector to self at t."""
return -1j*self.unit_tangent(t)
def curvature(self, t):
"""returns the curvature of the line, which is always zero."""
return 0
# def icurvature(self, kappa):
# """returns a list of t-values such that 0 <= t<= 1 and
# seg.curvature(t) = kappa."""
# if kappa:
# raise ValueError("The .icurvature() method for Line elements will "
# "return an empty list if kappa is nonzero and "
# "will raise this exception when kappa is zero as "
# "this is true at every point on the line.")
# return []
def reversed(self):
"""returns a copy of the Line object with its orientation reversed."""
return Line(self.end, self.start)
def intersect(self, other_seg, tol=None):
"""Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points.
tol is not used."""
if isinstance(other_seg, Line):
assert other_seg.end != other_seg.start and self.end != self.start
assert self != other_seg
# Solve the system [p1-p0, q1-q0]*[t1, t2]^T = q0 - p0
# where self == Line(p0, p1) and other_seg == Line(q0, q1)
a = (self.start.real, self.end.real)
b = (self.start.imag, self.end.imag)
c = (other_seg.start.real, other_seg.end.real)
d = (other_seg.start.imag, other_seg.end.imag)
denom = ((a[1] - a[0])*(d[0] - d[1]) -
(b[1] - b[0])*(c[0] - c[1]))
if denom == 0:
return []
t1 = (c[0]*(b[0] - d[1]) -
c[1]*(b[0] - d[0]) -
a[0]*(d[0] - d[1]))/denom
t2 = -(a[1]*(b[0] - d[0]) -
a[0]*(b[1] - d[0]) -
c[0]*(b[0] - b[1]))/denom
if 0 <= t1 <= 1 and 0 <= t2 <= 1:
return [(t1, t2)]
return []
elif isinstance(other_seg, QuadraticBezier):
t2t1s = bezier_by_line_intersections(other_seg, self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, CubicBezier):
t2t1s = bezier_by_line_intersections(other_seg, self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Arc):
t2t1s = other_seg.intersect(self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Path):
raise TypeError(
"other_seg must be a path segment, not a Path object, use "
"Path.intersect().")
else:
raise TypeError("other_seg must be a path segment.")
def bbox(self):
"""returns the bounding box for the segment in the form
(xmin, xmax, ymin, ymax)."""
xmin = min(self.start.real, self.end.real)
xmax = max(self.start.real, self.end.real)
ymin = min(self.start.imag, self.end.imag)
ymax = max(self.start.imag, self.end.imag)
return xmin, xmax, ymin, ymax
def cropped(self, t0, t1):
"""returns a cropped copy of this segment which starts at
self.point(t0) and ends at self.point(t1)."""
return Line(self.point(t0), self.point(t1))
def split(self, t):
"""returns two segments, whose union is this segment and which join at
self.point(t)."""
pt = self.point(t)
return Line(self.start, pt), Line(pt, self.end)
def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize
and maximize, respectively, the distance d = |self.point(t)-origin|."""
return bezier_radialrange(self, origin,
return_all_global_extrema=return_all_global_extrema)
def rotated(self, degs, origin=None):
"""Returns a copy of self rotated by `degs` degrees (CCW) around the
point `origin` (a complex number). By default `origin` is either
`self.point(0.5)`, or in the case that self is an Arc object,
`origin` defaults to `self.center`."""
return rotate(self, degs, origin=origin)
def translated(self, z0):
"""Returns a copy of self shifted by the complex quantity `z0` such
that self.translated(z0).point(t) = self.point(t) + z0 for any t."""
return translate(self, z0)
class QuadraticBezier(object):
# For compatibility with old pickle files.
_length_info = {'length': None, 'bpoints': None}
def __init__(self, start, control, end):
self.start = start
self.end = end
self.control = control
# used to know if self._length needs to be updated
self._length_info = {'length': None, 'bpoints': None}
def __repr__(self):
return 'QuadraticBezier(start=%s, control=%s, end=%s)' % (
self.start, self.control, self.end)
def __eq__(self, other):
if not isinstance(other, QuadraticBezier):
return NotImplemented
return self.start == other.start and self.end == other.end \
and self.control == other.control
def __ne__(self, other):
if not isinstance(other, QuadraticBezier):
return NotImplemented
return not self == other
def __getitem__(self, item):
return self.bpoints()[item]
def __len__(self):
return 3
def is_smooth_from(self, previous, warning_on=True):
"""[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]"""
if warning_on:
warn(_is_smooth_from_warning)
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (
previous.end - previous.control))
else:
return self.control == self.start
def joins_smoothly_with(self, previous, wrt_parameterization=False,
error=0):
"""Checks if this segment joins smoothly with previous segment. By
default, this only checks that this segment starts moving (at t=0) in
the same direction (and from the same positive) as previous stopped
moving (at t=1). To check if the tangent magnitudes also match, set
wrt_parameterization=True."""
if wrt_parameterization:
return self.start == previous.end and abs(
self.derivative(0) - previous.derivative(1)) <= error
else:
return self.start == previous.end and abs(
self.unit_tangent(0) - previous.unit_tangent(1)) <= error
def point(self, t):
"""returns the coordinates of the Bezier curve evaluated at t."""
return (1 - t)**2*self.start + 2*(1 - t)*t*self.control + t**2*self.end
def length(self, t0=0, t1=1, error=None, min_depth=None):
if t0 == 1 and t1 == 0:
if self._length_info['bpoints'] == self.bpoints():
return self._length_info['length']
a = self.start - 2*self.control + self.end
b = 2*(self.control - self.start)
a_dot_b = a.real*b.real + a.imag*b.imag
if abs(a) < 1e-12:
s = abs(b)*(t1 - t0)
elif abs(a_dot_b + abs(a)*abs(b)) < 1e-12:
tstar = abs(b)/(2*abs(a))
if t1 < tstar:
return abs(a)*(t0**2 - t1**2) - abs(b)*(t0 - t1)
elif tstar < t0:
return abs(a)*(t1**2 - t0**2) - abs(b)*(t1 - t0)
else:
return abs(a)*(t1**2 + t0**2) - abs(b)*(t1 + t0) + \
abs(b)**2/(2*abs(a))
else:
c2 = 4*(a.real**2 + a.imag**2)
c1 = 4*a_dot_b
c0 = b.real**2 + b.imag**2
beta = c1/(2*c2)
gamma = c0/c2 - beta**2
dq1_mag = sqrt(c2*t1**2 + c1*t1 + c0)
dq0_mag = sqrt(c2*t0**2 + c1*t0 + c0)
logarand = (sqrt(c2)*(t1 + beta) + dq1_mag) / \
(sqrt(c2)*(t0 + beta) + dq0_mag)
s = (t1 + beta)*dq1_mag - (t0 + beta)*dq0_mag + \
gamma*sqrt(c2)*log(logarand)
s /= 2
if t0 == 1 and t1 == 0:
self._length_info['length'] = s
self._length_info['bpoints'] = self.bpoints()
return self._length_info['length']
else:
return s
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
def bpoints(self):
"""returns the Bezier control points of the segment."""
return self.start, self.control, self.end
def poly(self, return_coeffs=False):
"""returns the quadratic as a Polynomial object."""
p = self.bpoints()
coeffs = (p[0] - 2*p[1] + p[2], 2*(p[1] - p[0]), p[0])
if return_coeffs:
return coeffs
else:
return np.poly1d(coeffs)
def derivative(self, t, n=1):
"""returns the nth derivative of the segment at t.
Note: Bezier curves can have points where their derivative vanishes.
If you are interested in the tangent direction, use the unit_tangent()
method instead."""
p = self.bpoints()
if n == 1:
return 2*((p[1] - p[0])*(1 - t) + (p[2] - p[1])*t)
elif n == 2:
return 2*(p[2] - 2*p[1] + p[0])
elif n > 2:
return 0
else:
raise ValueError("n should be a positive integer.")
def unit_tangent(self, t):
"""returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number). If the tangent
vector's magnitude is zero, this method will find the limit of
self.derivative(tau)/abs(self.derivative(tau)) as tau approaches t."""
return bezier_unit_tangent(self, t)
def normal(self, t):
"""returns the (right hand rule) unit normal vector to self at t."""
return -1j*self.unit_tangent(t)
def curvature(self, t):
"""returns the curvature of the segment at t."""
return segment_curvature(self, t)
# def icurvature(self, kappa):
# """returns a list of t-values such that 0 <= t<= 1 and
# seg.curvature(t) = kappa."""
# z = self.poly()
# x, y = real(z), imag(z)
# dx, dy = x.deriv(), y.deriv()
# ddx, ddy = dx.deriv(), dy.deriv()
#
# p = kappa**2*(dx**2 + dy**2)**3 - (dx*ddy - ddx*dy)**2
# return polyroots01(p)
def reversed(self):
"""returns a copy of the QuadraticBezier object with its orientation
reversed."""
new_quad = QuadraticBezier(self.end, self.control, self.start)
if self._length_info['length']:
new_quad._length_info = self._length_info
new_quad._length_info['bpoints'] = (
self.end, self.control, self.start)
return new_quad
def intersect(self, other_seg, tol=1e-12):
"""Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points."""
if isinstance(other_seg, Line):
return bezier_by_line_intersections(self, other_seg)
elif isinstance(other_seg, QuadraticBezier):
assert self != other_seg
longer_length = max(self.length(), other_seg.length())
return bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
elif isinstance(other_seg, CubicBezier):
longer_length = max(self.length(), other_seg.length())
return bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
elif isinstance(other_seg, Arc):
t2t1s = other_seg.intersect(self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Path):
raise TypeError(
"other_seg must be a path segment, not a Path object, use "
"Path.intersect().")
else:
raise TypeError("other_seg must be a path segment.")
def bbox(self):
"""returns the bounding box for the segment in the form
(xmin, xmax, ymin, ymax)."""
return bezier_bounding_box(self)
def split(self, t):
"""returns two segments, whose union is this segment and which join at
self.point(t)."""
bpoints1, bpoints2 = split_bezier(self.bpoints(), t)
return QuadraticBezier(*bpoints1), QuadraticBezier(*bpoints2)
def cropped(self, t0, t1):
"""returns a cropped copy of this segment which starts at
self.point(t0) and ends at self.point(t1)."""
return QuadraticBezier(*crop_bezier(self, t0, t1))
def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize
and maximize, respectively, the distance d = |self.point(t)-origin|."""
return bezier_radialrange(self, origin,
return_all_global_extrema=return_all_global_extrema)
def rotated(self, degs, origin=None):
"""Returns a copy of self rotated by `degs` degrees (CCW) around the
point `origin` (a complex number). By default `origin` is either
`self.point(0.5)`, or in the case that self is an Arc object,
`origin` defaults to `self.center`."""
return rotate(self, degs, origin=origin)
def translated(self, z0):
"""Returns a copy of self shifted by the complex quantity `z0` such
that self.translated(z0).point(t) = self.point(t) + z0 for any t."""
return translate(self, z0)
class CubicBezier(object):
# For compatibility with old pickle files.
_length_info = {'length': None, 'bpoints': None, 'error': None,
'min_depth': None}
def __init__(self, start, control1, control2, end):
self.start = start
self.control1 = control1
self.control2 = control2
self.end = end
# used to know if self._length needs to be updated
self._length_info = {'length': None, 'bpoints': None, 'error': None,
'min_depth': None}
def __repr__(self):
return 'CubicBezier(start=%s, control1=%s, control2=%s, end=%s)' % (
self.start, self.control1, self.control2, self.end)
def __eq__(self, other):
if not isinstance(other, CubicBezier):
return NotImplemented
return self.start == other.start and self.end == other.end \
and self.control1 == other.control1 \
and self.control2 == other.control2
def __ne__(self, other):
if not isinstance(other, CubicBezier):
return NotImplemented
return not self == other
def __getitem__(self, item):
return self.bpoints()[item]
def __len__(self):
return 4
def is_smooth_from(self, previous, warning_on=True):
"""[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]"""
if warning_on:
warn(_is_smooth_from_warning)
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (
previous.end - previous.control2))
else:
return self.control1 == self.start
def joins_smoothly_with(self, previous, wrt_parameterization=False):
"""Checks if this segment joins smoothly with previous segment. By
default, this only checks that this segment starts moving (at t=0) in
the same direction (and from the same positive) as previous stopped
moving (at t=1). To check if the tangent magnitudes also match, set
wrt_parameterization=True."""
if wrt_parameterization:
return self.start == previous.end and np.isclose(
self.derivative(0), previous.derivative(1))
else:
return self.start == previous.end and np.isclose(
self.unit_tangent(0), previous.unit_tangent(1))
def point(self, t):
"""Evaluate the cubic Bezier curve at t using Horner's rule."""
# algebraically equivalent to
# P0*(1-t)**3 + 3*P1*t*(1-t)**2 + 3*P2*(1-t)*t**2 + P3*t**3
# for (P0, P1, P2, P3) = self.bpoints()
return self.start + t*(
3*(self.control1 - self.start) + t*(
3*(self.start + self.control2) - 6*self.control1 + t*(
-self.start + 3*(self.control1 - self.control2) + self.end
)))
def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):
"""Calculate the length of the path up to a certain position"""
if t0 == 0 and t1 == 1:
if self._length_info['bpoints'] == self.bpoints() \
and self._length_info['error'] >= error \
and self._length_info['min_depth'] >= min_depth:
return self._length_info['length']
# using scipy.integrate.quad is quick
if _quad_available:
s = quad(lambda tau: abs(self.derivative(tau)), t0, t1,
epsabs=error, limit=1000)[0]
else:
s = segment_length(self, t0, t1, self.point(t0), self.point(t1),
error, min_depth, 0)
if t0 == 0 and t1 == 1:
self._length_info['length'] = s
self._length_info['bpoints'] = self.bpoints()
self._length_info['error'] = error
self._length_info['min_depth'] = min_depth
return self._length_info['length']
else:
return s
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
def bpoints(self):
"""returns the Bezier control points of the segment."""
return self.start, self.control1, self.control2, self.end
def poly(self, return_coeffs=False):
"""Returns a the cubic as a Polynomial object."""
p = self.bpoints()
coeffs = (-p[0] + 3*(p[1] - p[2]) + p[3],
3*(p[0] - 2*p[1] + p[2]),
3*(-p[0] + p[1]),
p[0])
if return_coeffs:
return coeffs
else:
return np.poly1d(coeffs)
def derivative(self, t, n=1):
"""returns the nth derivative of the segment at t.
Note: Bezier curves can have points where their derivative vanishes.
If you are interested in the tangent direction, use the unit_tangent()
method instead."""
p = self.bpoints()
if n == 1:
return 3*(p[1] - p[0])*(1 - t)**2 + 6*(p[2] - p[1])*(1 - t)*t + 3*(
p[3] - p[2])*t**2
elif n == 2:
return 6*(
(1 - t)*(p[2] - 2*p[1] + p[0]) + t*(p[3] - 2*p[2] + p[1]))
elif n == 3:
return 6*(p[3] - 3*(p[2] - p[1]) - p[0])
elif n > 3:
return 0
else:
raise ValueError("n should be a positive integer.")
def unit_tangent(self, t):
"""returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number). If the tangent
vector's magnitude is zero, this method will find the limit of
self.derivative(tau)/abs(self.derivative(tau)) as tau approaches t."""
return bezier_unit_tangent(self, t)
def normal(self, t):
"""returns the (right hand rule) unit normal vector to self at t."""
return -1j * self.unit_tangent(t)
def curvature(self, t):
"""returns the curvature of the segment at t."""
return segment_curvature(self, t)
# def icurvature(self, kappa):
# """returns a list of t-values such that 0 <= t<= 1 and
# seg.curvature(t) = kappa."""
# z = self.poly()
# x, y = real(z), imag(z)
# dx, dy = x.deriv(), y.deriv()
# ddx, ddy = dx.deriv(), dy.deriv()
#
# p = kappa**2*(dx**2 + dy**2)**3 - (dx*ddy - ddx*dy)**2
# return polyroots01(p)
def reversed(self):
"""returns a copy of the CubicBezier object with its orientation
reversed."""
new_cub = CubicBezier(self.end, self.control2, self.control1,
self.start)
if self._length_info['length']:
new_cub._length_info = self._length_info
new_cub._length_info['bpoints'] = (
self.end, self.control2, self.control1, self.start)
return new_cub
def intersect(self, other_seg, tol=1e-12):
"""Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points."""
if isinstance(other_seg, Line):
return bezier_by_line_intersections(self, other_seg)
elif (isinstance(other_seg, QuadraticBezier) or
isinstance(other_seg, CubicBezier)):
assert self != other_seg
longer_length = max(self.length(), other_seg.length())
return bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
elif isinstance(other_seg, Arc):
t2t1s = other_seg.intersect(self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Path):
raise TypeError(
"other_seg must be a path segment, not a Path object, use "
"Path.intersect().")
else:
raise TypeError("other_seg must be a path segment.")
def bbox(self):
"""returns the bounding box for the segment in the form
(xmin, xmax, ymin, ymax)."""
return bezier_bounding_box(self)
def split(self, t):
"""returns two segments, whose union is this segment and which join at
self.point(t)."""
bpoints1, bpoints2 = split_bezier(self.bpoints(), t)
return CubicBezier(*bpoints1), CubicBezier(*bpoints2)
def cropped(self, t0, t1):
"""returns a cropped copy of this segment which starts at
self.point(t0) and ends at self.point(t1)."""
return CubicBezier(*crop_bezier(self, t0, t1))
def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize
and maximize, respectively, the distance d = |self.point(t)-origin|."""
return bezier_radialrange(self, origin,
return_all_global_extrema=return_all_global_extrema)
def rotated(self, degs, origin=None):
"""Returns a copy of self rotated by `degs` degrees (CCW) around the
point `origin` (a complex number). By default `origin` is either
`self.point(0.5)`, or in the case that self is an Arc object,
`origin` defaults to `self.center`."""
return rotate(self, degs, origin=origin)
def translated(self, z0):
"""Returns a copy of self shifted by the complex quantity `z0` such
that self.translated(z0).point(t) = self.point(t) + z0 for any t."""
return translate(self, z0)
class Arc(object):
def __init__(self, start, radius, rotation, large_arc, sweep, end,
autoscale_radius=True):
"""
This should be thought of as a part of an ellipse connecting two
points on that ellipse, start and end.
Parameters
----------
start : complex
The start point of the curve. Note: `start` and `end` cannot be the
same. To make a full ellipse or circle, use two `Arc` objects.
radius : complex
rx + 1j*ry, where rx and ry are the radii of the ellipse (also
known as its semi-major and semi-minor axes, or vice-versa or if
rx < ry).
Note: If rx = 0 or ry = 0 then this arc is treated as a
straight line segment joining the endpoints.
Note: If rx or ry has a negative sign, the sign is dropped; the
absolute value is used instead.
Note: If no such ellipse exists, the radius will be scaled so
that one does (unless autoscale_radius is set to False).
rotation : float
This is the CCW angle (in degrees) from the positive x-axis of the
current coordinate system to the x-axis of the ellipse.
large_arc : bool
Given two points on an ellipse, there are two elliptical arcs
connecting those points, the first going the short way around the
ellipse, and the second going the long way around the ellipse. If
`large_arc == False`, the shorter elliptical arc will be used. If
`large_arc == True`, then longer elliptical will be used.
In other words, `large_arc` should be 0 for arcs spanning less than
or equal to 180 degrees and 1 for arcs spanning greater than 180
degrees.
sweep : bool
For any acceptable parameters `start`, `end`, `rotation`, and
`radius`, there are two ellipses with the given major and minor
axes (radii) which connect `start` and `end`. One which connects
them in a CCW fashion and one which connected them in a CW
fashion. If `sweep == True`, the CCW ellipse will be used. If
`sweep == False`, the CW ellipse will be used. See note on curve
orientation below.
end : complex
The end point of the curve. Note: `start` and `end` cannot be the
same. To make a full ellipse or circle, use two `Arc` objects.
autoscale_radius : bool
If `autoscale_radius == True`, then will also scale `self.radius`
in the case that no ellipse exists with the input parameters
(see inline comments for further explanation).
Derived Parameters/Attributes
-----------------------------
self.theta : float
This is the phase (in degrees) of self.u1transform(self.start).
It is $\theta_1$ in the official documentation and ranges from
-180 to 180.
self.delta : float
This is the angular distance (in degrees) between the start and
end of the arc after the arc has been sent to the unit circle
through self.u1transform().
It is $\Delta\theta$ in the official documentation and ranges from
-360 to 360; being positive when the arc travels CCW and negative
otherwise (i.e. is positive/negative when sweep == True/False).
self.center : complex
This is the center of the arc's ellipse.
self.phi : float
The arc's rotation in radians, i.e. `radians(self.rotation)`.
self.rot_matrix : complex
Equal to `exp(1j * self.phi)` which is also equal to
`cos(self.phi) + 1j*sin(self.phi)`.
Note on curve orientation (CW vs CCW)
-------------------------------------
The notions of clockwise (CW) and counter-clockwise (CCW) are reversed
in some sense when viewing SVGs (as the y coordinate starts at the top
of the image and increases towards the bottom).
"""
assert start != end
assert radius.real != 0 and radius.imag != 0
self.start = start
self.radius = abs(radius.real) + 1j*abs(radius.imag)
self.rotation = rotation
self.large_arc = bool(large_arc)
self.sweep = bool(sweep)
self.end = end
self.autoscale_radius = autoscale_radius
# Convenience parameters
self.phi = radians(self.rotation)
self.rot_matrix = exp(1j*self.phi)
# Derive derived parameters
self._parameterize()
def __repr__(self):
params = (self.start, self.radius, self.rotation,
self.large_arc, self.sweep, self.end)
return ("Arc(start={}, radius={}, rotation={}, "
"large_arc={}, sweep={}, end={})".format(*params))
def __eq__(self, other):
if not isinstance(other, Arc):
return NotImplemented
return self.start == other.start and self.end == other.end \
and self.radius == other.radius \
and self.rotation == other.rotation \
and self.large_arc == other.large_arc and self.sweep == other.sweep
def __ne__(self, other):
if not isinstance(other, Arc):
return NotImplemented
return not self == other
def _parameterize(self):
# See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
# my notation roughly follows theirs
rx = self.radius.real
ry = self.radius.imag
rx_sqd = rx*rx
ry_sqd = ry*ry
# Transform z-> z' = x' + 1j*y'
# = self.rot_matrix**(-1)*(z - (end+start)/2)
# coordinates. This translates the ellipse so that the midpoint
# between self.end and self.start lies on the origin and rotates
# the ellipse so that the its axes align with the xy-coordinate axes.
# Note: This sends self.end to -self.start
zp1 = (1/self.rot_matrix)*(self.start - self.end)/2
x1p, y1p = zp1.real, zp1.imag
x1p_sqd = x1p*x1p
y1p_sqd = y1p*y1p
# Correct out of range radii
# Note: an ellipse going through start and end with radius and phi
# exists if and only if radius_check is true
radius_check = (x1p_sqd/rx_sqd) + (y1p_sqd/ry_sqd)
if radius_check > 1:
if self.autoscale_radius:
rx *= sqrt(radius_check)
ry *= sqrt(radius_check)
self.radius = rx + 1j*ry
rx_sqd = rx*rx
ry_sqd = ry*ry
else:
raise ValueError("No such elliptic arc exists.")
# Compute c'=(c_x', c_y'), the center of the ellipse in (x', y') coords
# Noting that, in our new coord system, (x_2', y_2') = (-x_1', -x_2')
# and our ellipse is cut out by of the plane by the algebraic equation
# (x'-c_x')**2 / r_x**2 + (y'-c_y')**2 / r_y**2 = 1,
# we can find c' by solving the system of two quadratics given by
# plugging our transformed endpoints (x_1', y_1') and (x_2', y_2')
tmp = rx_sqd*y1p_sqd + ry_sqd*x1p_sqd
radicand = (rx_sqd*ry_sqd - tmp) / tmp
try:
radical = sqrt(radicand)
except ValueError:
radical = 0
if self.large_arc == self.sweep:
cp = -radical*(rx*y1p/ry - 1j*ry*x1p/rx)
else:
cp = radical*(rx*y1p/ry - 1j*ry*x1p/rx)
# The center in (x,y) coordinates is easy to find knowing c'
self.center = exp(1j*self.phi)*cp + (self.start + self.end)/2
# Now we do a second transformation, from (x', y') to (u_x, u_y)
# coordinates, which is a translation moving the center of the
# ellipse to the origin and a dilation stretching the ellipse to be
# the unit circle
u1 = (x1p - cp.real)/rx + 1j*(y1p - cp.imag)/ry # transformed start
u2 = (-x1p - cp.real)/rx + 1j*(-y1p - cp.imag)/ry # transformed end
# Now compute theta and delta (we'll define them as we go)
# delta is the angular distance of the arc (w.r.t the circle)
# theta is the angle between the positive x'-axis and the start point
# on the circle
if u1.imag > 0:
self.theta = degrees(acos(u1.real))
elif u1.imag < 0:
self.theta = -degrees(acos(u1.real))
else:
if u1.real > 0: # start is on pos u_x axis
self.theta = 0
else: # start is on neg u_x axis
# Note: This behavior disagrees with behavior documented in
# http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
# where theta is set to 0 in this case.
self.theta = 180
det_uv = u1.real*u2.imag - u1.imag*u2.real
acosand = u1.real*u2.real + u1.imag*u2.imag
if acosand > 1 or acosand < -1:
acosand = round(acosand)
if det_uv > 0:
self.delta = degrees(acos(acosand))
elif det_uv < 0:
self.delta = -degrees(acos(acosand))
else:
if u1.real*u2.real + u1.imag*u2.imag > 0:
# u1 == u2
self.delta = 0
else:
# u1 == -u2
# Note: This behavior disagrees with behavior documented in
# http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
# where delta is set to 0 in this case.
self.delta = 180
if not self.sweep and self.delta >= 0:
self.delta -= 360
elif self.large_arc and self.delta <= 0:
self.delta += 360
def point(self, t):
if t == 0:
return self.start
if t == 1:
return self.end
angle = radians(self.theta + t*self.delta)
cosphi = self.rot_matrix.real
sinphi = self.rot_matrix.imag
rx = self.radius.real
ry = self.radius.imag
# z = self.rot_matrix*(rx*cos(angle) + 1j*ry*sin(angle)) + self.center
x = rx*cosphi*cos(angle) - ry*sinphi*sin(angle) + self.center.real
y = rx*sinphi*cos(angle) + ry*cosphi*sin(angle) + self.center.imag
return complex(x, y)
def centeriso(self, z):
"""This is an isometry that translates and rotates self so that it
is centered on the origin and has its axes aligned with the xy axes."""
return (1/self.rot_matrix)*(z - self.center)
def icenteriso(self, zeta):
"""This is an isometry, the inverse of standardiso()."""
return self.rot_matrix*zeta + self.center
def u1transform(self, z):
"""This is an affine transformation (same as used in
self._parameterize()) that sends self to the unit circle."""
zeta = (1/self.rot_matrix)*(z - self.center) # same as centeriso(z)
x, y = real(zeta), imag(zeta)
return x/self.radius.real + 1j*y/self.radius.imag
def iu1transform(self, zeta):
"""This is an affine transformation, the inverse of
self.u1transform()."""
x = real(zeta)
y = imag(zeta)
z = x*self.radius.real + y*self.radius.imag
return self.rot_matrix*z + self.center
def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):
"""The length of an elliptical large_arc segment requires numerical
integration, and in that case it's simpler to just do a geometric
approximation, as for cubic bezier curves."""
assert 0 <= t0 <= 1 and 0 <= t1 <= 1
if _quad_available:
return quad(lambda tau: abs(self.derivative(tau)), t0, t1,
epsabs=error, limit=1000)[0]
else:
return segment_length(self, t0, t1, self.point(t0), self.point(t1),
error, min_depth, 0)
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
def joins_smoothly_with(self, previous, wrt_parameterization=False,
error=0):
"""Checks if this segment joins smoothly with previous segment. By
default, this only checks that this segment starts moving (at t=0) in
the same direction (and from the same positive) as previous stopped
moving (at t=1). To check if the tangent magnitudes also match, set
wrt_parameterization=True."""
if wrt_parameterization:
return self.start == previous.end and abs(
self.derivative(0) - previous.derivative(1)) <= error
else:
return self.start == previous.end and abs(
self.unit_tangent(0) - previous.unit_tangent(1)) <= error
def derivative(self, t, n=1):
"""returns the nth derivative of the segment at t."""
angle = radians(self.theta + t*self.delta)
phi = radians(self.rotation)
rx = self.radius.real
ry = self.radius.imag
k = (self.delta*2*pi/360)**n # ((d/dt)angle)**n
if n % 4 == 0 and n > 0:
return rx*cos(phi)*cos(angle) - ry*sin(phi)*sin(angle) + 1j*(
rx*sin(phi)*cos(angle) + ry*cos(phi)*sin(angle))
elif n % 4 == 1:
return k*(-rx*cos(phi)*sin(angle) - ry*sin(phi)*cos(angle) + 1j*(
-rx*sin(phi)*sin(angle) + ry*cos(phi)*cos(angle)))
elif n % 4 == 2:
return k*(-rx*cos(phi)*cos(angle) + ry*sin(phi)*sin(angle) + 1j*(
-rx*sin(phi)*cos(angle) - ry*cos(phi)*sin(angle)))
elif n % 4 == 3:
return k*(rx*cos(phi)*sin(angle) + ry*sin(phi)*cos(angle) + 1j*(
rx*sin(phi)*sin(angle) - ry*cos(phi)*cos(angle)))
else:
raise ValueError("n should be a positive integer.")
def unit_tangent(self, t):
"""returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number)."""
dseg = self.derivative(t)
return dseg/abs(dseg)
def normal(self, t):
"""returns the (right hand rule) unit normal vector to self at t."""
return -1j*self.unit_tangent(t)
def curvature(self, t):
"""returns the curvature of the segment at t."""
return segment_curvature(self, t)
# def icurvature(self, kappa):
# """returns a list of t-values such that 0 <= t<= 1 and
# seg.curvature(t) = kappa."""
#
# a, b = self.radius.real, self.radius.imag
# if kappa > min(a, b)/max(a, b)**2 or kappa <= 0:
# return []
# if a==b:
# if kappa != 1/a:
# return []
# else:
# raise ValueError(
# "The .icurvature() method for Arc elements with "
# "radius.real == radius.imag (i.e. circle segments) "
# "will raise this exception when kappa is 1/radius.real as "
# "this is true at every point on the circle segment.")
#
# # kappa = a*b / (a^2sin^2(tau) + b^2cos^2(tau))^(3/2), tau=2*pi*phase
# sin2 = np.poly1d([1, 0])
# p = kappa**2*(a*sin2 + b*(1 - sin2))**3 - a*b
# sin2s = polyroots01(p)
# taus = []
#
# for sin2 in sin2s:
# taus += [np.arcsin(sqrt(sin2)), np.arcsin(-sqrt(sin2))]
#
# # account for the other branch of arcsin
# sgn = lambda x: x/abs(x) if x else 0
# other_taus = [sgn(tau)*np.pi - tau for tau in taus if abs(tau) != np.pi/2]
# taus = taus + other_taus
#
# # get rid of points not included in segment
# ts = [phase2t(tau) for tau in taus]
#
# return [t for t in ts if 0<=t<=1]
def reversed(self):
"""returns a copy of the Arc object with its orientation reversed."""
return Arc(self.end, self.radius, self.rotation, self.large_arc,
not self.sweep, self.start)
def phase2t(self, psi):
"""Given phase -pi < psi <= pi,
returns the t value such that
exp(1j*psi) = self.u1transform(self.point(t)).
"""
def _deg(rads, domain_lower_limit):
# Convert rads to degrees in [0, 360) domain
degs = degrees(rads % (2*pi))
# Convert to [domain_lower_limit, domain_lower_limit + 360) domain
k = domain_lower_limit // 360
degs += k * 360
if degs < domain_lower_limit:
degs += 360
return degs
if self.delta > 0:
degs = _deg(psi, domain_lower_limit=self.theta)
else:
degs = _deg(psi, domain_lower_limit=self.theta)
return (degs - self.theta)/self.delta
def intersect(self, other_seg, tol=1e-12):
"""NOT FULLY IMPLEMENTED. Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points.
Note: Arc related intersections are only partially supported, i.e. are
only half-heartedly implemented and not well tested. Please feel free
to let me know if you're interested in such a feature -- or even better
please submit an implementation if you want to code one."""
if is_bezier_segment(other_seg):
u1poly = self.u1transform(other_seg.poly())
u1poly_mag2 = real(u1poly)**2 + imag(u1poly)**2
t2s = polyroots01(u1poly_mag2 - 1)
t1s = [self.phase2t(phase(u1poly(t2))) for t2 in t2s]
return list(zip(t1s, t2s))
elif isinstance(other_seg, Arc):
assert other_seg != self
# This could be made explicit to increase efficiency
longer_length = max(self.length(), other_seg.length())
inters = bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
# ad hoc fix for redundant solutions
if len(inters) > 2:
def keyfcn(tpair):
t1, t2 = tpair
return abs(self.point(t1) - other_seg.point(t2))
inters.sort(key=keyfcn)
for idx in range(1, len(inters)-1):
if (abs(inters[idx][0] - inters[idx + 1][0])
< abs(inters[idx][0] - inters[0][0])):
return [inters[0], inters[idx]]
else:
return [inters[0], inters[-1]]
return inters
else:
raise TypeError("other_seg should be a Arc, Line, "
"QuadraticBezier, or CubicBezier object.")
def bbox(self):
"""returns a bounding box for the segment in the form
(xmin, xmax, ymin, ymax)."""
# a(t) = radians(self.theta + self.delta*t)
# = (2*pi/360)*(self.theta + self.delta*t)
# x'=0: ~~~~~~~~~
# -rx*cos(phi)*sin(a(t)) = ry*sin(phi)*cos(a(t))
# -(rx/ry)*cot(phi)*tan(a(t)) = 1
# a(t) = arctan(-(ry/rx)tan(phi)) + pi*k === atan_x
# y'=0: ~~~~~~~~~~
# rx*sin(phi)*sin(a(t)) = ry*cos(phi)*cos(a(t))
# (rx/ry)*tan(phi)*tan(a(t)) = 1
# a(t) = arctan((ry/rx)*cot(phi))
# atanres = arctan((ry/rx)*cot(phi)) === atan_y
# ~~~~~~~~
# (2*pi/360)*(self.theta + self.delta*t) = atanres + pi*k
# Therfore, for both x' and y', we have...
# t = ((atan_{x/y} + pi*k)*(360/(2*pi)) - self.theta)/self.delta
# for all k s.t. 0 < t < 1
from math import atan, tan
if cos(self.phi) == 0:
atan_x = pi/2
atan_y = 0
elif sin(self.phi) == 0:
atan_x = 0
atan_y = pi/2
else:
rx, ry = self.radius.real, self.radius.imag
atan_x = atan(-(ry/rx)*tan(self.phi))
atan_y = atan((ry/rx)/tan(self.phi))
def angle_inv(ang, k): # inverse of angle from Arc.derivative()
return ((ang + pi*k)*(360/(2*pi)) - self.theta)/self.delta
xtrema = [self.start.real, self.end.real]
ytrema = [self.start.imag, self.end.imag]
for k in range(-4, 5):
tx = angle_inv(atan_x, k)
ty = angle_inv(atan_y, k)
if 0 <= tx <= 1:
xtrema.append(self.point(tx).real)
if 0 <= ty <= 1:
ytrema.append(self.point(ty).imag)
xmin = max(xtrema)
return min(xtrema), max(xtrema), min(ytrema), max(ytrema)
def split(self, t):
"""returns two segments, whose union is this segment and which join
at self.point(t)."""
return self.cropped(0, t), self.cropped(t, 1)
def cropped(self, t0, t1):
"""returns a cropped copy of this segment which starts at
self.point(t0) and ends at self.point(t1)."""
if abs(self.delta*(t1 - t0)) <= 180:
new_large_arc = 0
else:
new_large_arc = 1
return Arc(self.point(t0), radius=self.radius, rotation=self.rotation,
large_arc=new_large_arc, sweep=self.sweep,
end=self.point(t1), autoscale_radius=self.autoscale_radius)
def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize
and maximize, respectively, the distance,
d = |self.point(t)-origin|."""
u1orig = self.u1transform(origin)
if abs(u1orig) == 1: # origin lies on ellipse
t = self.phase2t(phase(u1orig))
d_min = 0
# Transform to a coordinate system where the ellipse is centered
# at the origin and its axes are horizontal/vertical
zeta0 = self.centeriso(origin)
a, b = self.radius.real, self.radius.imag
x0, y0 = zeta0.real, zeta0.imag
# Find t s.t. z'(t)
a2mb2 = (a**2 - b**2)
if u1orig.imag: # x != x0
coeffs = [a2mb2**2,
2*a2mb2*b**2*y0,
(-a**4 + (2*a**2 - b**2 + y0**2)*b**2 + x0**2)*b**2,
-2*a2mb2*b**4*y0,
-b**6*y0**2]
ys = polyroots(coeffs, realroots=True,
condition=lambda r: -b <= r <= b)
xs = (a*sqrt(1 - y**2/b**2) for y in ys)
ts = [self.phase2t(phase(self.u1transform(self.icenteriso(
complex(x, y))))) for x, y in zip(xs, ys)]
else: # This case is very similar, see notes and assume instead y0!=y
b2ma2 = (b**2 - a**2)
coeffs = [b2ma2**2,
2*b2ma2*a**2*x0,
(-b**4 + (2*b**2 - a**2 + x0**2)*a**2 + y0**2)*a**2,
-2*b2ma2*a**4*x0,
-a**6*x0**2]
xs = polyroots(coeffs, realroots=True,
condition=lambda r: -a <= r <= a)
ys = (b*sqrt(1 - x**2/a**2) for x in xs)
ts = [self.phase2t(phase(self.u1transform(self.icenteriso(
complex(x, y))))) for x, y in zip(xs, ys)]
raise _NotImplemented4ArcException
def rotated(self, degs, origin=None):
"""Returns a copy of self rotated by `degs` degrees (CCW) around the
point `origin` (a complex number). By default `origin` is either
`self.point(0.5)`, or in the case that self is an Arc object,
`origin` defaults to `self.center`."""
return rotate(self, degs, origin=origin)
def translated(self, z0):
"""Returns a copy of self shifted by the complex quantity `z0` such
that self.translated(z0).point(t) = self.point(t) + z0 for any t."""
return translate(self, z0)
def is_bezier_segment(x):
return (isinstance(x, Line) or
isinstance(x, QuadraticBezier) or
isinstance(x, CubicBezier))
def is_path_segment(x):
return is_bezier_segment(x) or isinstance(x, Arc)
class Path(MutableSequence):
"""A Path is a sequence of path segments"""
# Put it here, so there is a default if unpickled.
_closed = False
_start = None
_end = None
def __init__(self, *segments, **kw):
self._segments = list(segments)
self._length = None
self._lengths = None
if 'closed' in kw:
self.closed = kw['closed'] # DEPRECATED
if self._segments:
self._start = self._segments[0].start
self._end = self._segments[-1].end
else:
self._start = None
self._end = None
def __getitem__(self, index):
return self._segments[index]
def __setitem__(self, index, value):
self._segments[index] = value
self._length = None
self._start = self._segments[0].start
self._end = self._segments[-1].end
def __delitem__(self, index):
del self._segments[index]
self._length = None
self._start = self._segments[0].start
self._end = self._segments[-1].end
def __iter__(self):
return self._segments.__iter__()
def __contains__(self, x):
return self._segments.__contains__(x)
def insert(self, index, value):
self._segments.insert(index, value)
self._length = None
self._start = self._segments[0].start
self._end = self._segments[-1].end
def reversed(self):
"""returns a copy of the Path object with its orientation reversed."""
newpath = [seg.reversed() for seg in self]
newpath.reverse()
return Path(*newpath)
def __len__(self):
return len(self._segments)
def __repr__(self):
return "Path({})".format(
",\n ".join(repr(x) for x in self._segments))
def __eq__(self, other):
if not isinstance(other, Path):
return NotImplemented
if len(self) != len(other):
return False
for s, o in zip(self._segments, other._segments):
if not s == o:
return False
return True
def __ne__(self, other):
if not isinstance(other, Path):
return NotImplemented
return not self == other
def _calc_lengths(self, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):
if self._length is not None:
return
lengths = [each.length(error=error, min_depth=min_depth) for each in
self._segments]
self._length = sum(lengths)
self._lengths = [each/self._length for each in lengths]
def point(self, pos):
# Shortcuts
if pos == 0.0:
return self._segments[0].point(pos)
if pos == 1.0:
return self._segments[-1].point(pos)
self._calc_lengths()
# Find which segment the point we search for is located on:
segment_start = 0
for index, segment in enumerate(self._segments):
segment_end = segment_start + self._lengths[index]
if segment_end >= pos:
# This is the segment! How far in on the segment is the point?
segment_pos = (pos - segment_start)/(
segment_end - segment_start)
return segment.point(segment_pos)
segment_start = segment_end
def length(self, T0=0, T1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):
self._calc_lengths(error=error, min_depth=min_depth)
if T0 == 0 and T1 == 1:
return self._length
else:
if len(self) == 1:
return self[0].length(t0=T0, t1=T1)
idx0, t0 = self.T2t(T0)
idx1, t1 = self.T2t(T1)
if idx0 == idx1:
return self[idx0].length(t0=t0, t1=t1)
return (self[idx0].length(t0=t0) +
sum(self[idx].length() for idx in range(idx0 + 1, idx1)) +
self[idx1].length(t1=t1))
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
def iscontinuous(self):
"""Checks if a path is continuous with respect to its
parameterization."""
return all(self[i].end == self[i+1].start for i in range(len(self) - 1))
def continuous_subpaths(self):
"""Breaks self into its continuous components, returning a list of
continuous subpaths.
I.e.
(all(subpath.iscontinuous() for subpath in self.continuous_subpaths())
and self == concatpaths(self.continuous_subpaths()))
)
"""
subpaths = []
subpath_start = 0
for i in range(len(self) - 1):
if self[i].end != self[(i+1) % len(self)].start:
subpaths.append(Path(*self[subpath_start: i+1]))
subpath_start = i+1
subpaths.append(Path(*self[subpath_start: len(self)]))
return subpaths
def isclosed(self):
"""This function determines if a connected path is closed."""
assert len(self) != 0
assert self.iscontinuous()
return self.start == self.end
def isclosedac(self):
assert len(self) != 0
return self.start == self.end
def _is_closable(self):
end = self[-1].end
for segment in self:
if segment.start == end:
return True
return False
@property
def closed(self, warning_on=CLOSED_WARNING_ON):
"""The closed attribute is deprecated, please use the isclosed()
method instead. See _closed_warning for more information."""
mes = ("This attribute is deprecated, consider using isclosed() "
"method instead.\n\nThis attribute is kept for compatibility "
"with scripts created using svg.path (v2.0). You can prevent "
"this warning in the future by setting "
"CLOSED_WARNING_ON=False.")
if warning_on:
warn(mes)
return self._closed and self._is_closable()
@closed.setter
def closed(self, value):
value = bool(value)
if value and not self._is_closable():
raise ValueError("End does not coincide with a segment start.")
self._closed = value
@property
def start(self):
if not self._start:
self._start = self._segments[0].start
return self._start
@start.setter
def start(self, pt):
self._start = pt
self._segments[0].start = pt
@property
def end(self):
if not self._end:
self._end = self._segments[-1].end
return self._end
@end.setter
def end(self, pt):
self._end = pt
self._segments[-1].end = pt
def d(self, useSandT=False, use_closed_attrib=False):
"""Returns a path d-string for the path object.
For an explanation of useSandT and use_closed_attrib, see the
compatibility notes in the README."""
if use_closed_attrib:
self_closed = self.closed(warning_on=False)
if self_closed:
segments = self[:-1]
else:
segments = self[:]
else:
self_closed = False
segments = self[:]
current_pos = None
parts = []
previous_segment = None
end = self[-1].end
for segment in segments:
seg_start = segment.start
# If the start of this segment does not coincide with the end of
# the last segment or if this segment is actually the close point
# of a closed path, then we should start a new subpath here.
if current_pos != seg_start or \
(self_closed and seg_start == end and use_closed_attrib):
parts.append('M {},{}'.format(seg_start.real, seg_start.imag))
if isinstance(segment, Line):
args = segment.end.real, segment.end.imag
parts.append('L {},{}'.format(*args))
elif isinstance(segment, CubicBezier):
if useSandT and segment.is_smooth_from(previous_segment,
warning_on=False):
args = (segment.control2.real, segment.control2.imag,
segment.end.real, segment.end.imag)
parts.append('S {},{} {},{}'.format(*args))
else:
args = (segment.control1.real, segment.control1.imag,
segment.control2.real, segment.control2.imag,
segment.end.real, segment.end.imag)
parts.append('C {},{} {},{} {},{}'.format(*args))
elif isinstance(segment, QuadraticBezier):
if useSandT and segment.is_smooth_from(previous_segment,
warning_on=False):
args = segment.end.real, segment.end.imag
parts.append('T {},{}'.format(*args))
else:
args = (segment.control.real, segment.control.imag,
segment.end.real, segment.end.imag)
parts.append('Q {},{} {},{}'.format(*args))
elif isinstance(segment, Arc):
args = (segment.radius.real, segment.radius.imag,
segment.rotation,int(segment.large_arc),
int(segment.sweep),segment.end.real, segment.end.imag)
parts.append('A {},{} {} {:d},{:d} {},{}'.format(*args))
current_pos = segment.end
previous_segment = segment
if self_closed:
parts.append('Z')
return ' '.join(parts)
def joins_smoothly_with(self, previous, wrt_parameterization=False):
"""Checks if this Path object joins smoothly with previous
path/segment. By default, this only checks that this Path starts
moving (at t=0) in the same direction (and from the same positive) as
previous stopped moving (at t=1). To check if the tangent magnitudes
also match, set wrt_parameterization=True."""
if wrt_parameterization:
return self[0].start == previous.end and self.derivative(
0) == previous.derivative(1)
else:
return self[0].start == previous.end and self.unit_tangent(
0) == previous.unit_tangent(1)
def T2t(self, T):
"""returns the segment index, seg_idx, and segment parameter, t,
corresponding to the path parameter T. In other words, this is the
inverse of the Path.t2T() method."""
if T == 1:
return len(self)-1, 1
if T == 0:
return 0, 0
self._calc_lengths()
# Find which segment self.point(T) falls on:
T0 = 0 # the T-value the current segment starts on
for seg_idx, seg_length in enumerate(self._lengths):
T1 = T0 + seg_length # the T-value the current segment ends on
if T1 >= T:
# This is the segment!
t = (T - T0)/seg_length
return seg_idx, t
T0 = T1
assert 0 <= T <= 1
raise BugException
def t2T(self, seg, t):
"""returns the path parameter T which corresponds to the segment
parameter t. In other words, for any Path object, path, and any
segment in path, seg, T(t) = path.t2T(seg, t) is the unique
reparameterization such that path.point(T(t)) == seg.point(t) for all
0 <= t <= 1.
Input Note: seg can be a segment in the Path object or its
corresponding index."""
self._calc_lengths()
# Accept an index or a segment for seg
if isinstance(seg, int):
seg_idx = seg
else:
try:
seg_idx = self.index(seg)
except ValueError:
assert is_path_segment(seg) or isinstance(seg, int)
raise
segment_start = sum(self._lengths[:seg_idx])
segment_end = segment_start + self._lengths[seg_idx]
T = (segment_end - segment_start)*t + segment_start
return T
def derivative(self, T, n=1):
"""returns the tangent vector of the Path at T (centered at the origin
and expressed as a complex number).
Note: Bezier curves can have points where their derivative vanishes.
If you are interested in the tangent direction, use unit_tangent()
method instead."""
seg_idx, t = self.T2t(T)
seg = self._segments[seg_idx]
return seg.derivative(t, n=n)/seg.length()**n
def unit_tangent(self, T):
"""returns the unit tangent vector of the Path at T (centered at the
origin and expressed as a complex number). If the tangent vector's
magnitude is zero, this method will find the limit of
self.derivative(tau)/abs(self.derivative(tau)) as tau approaches T."""
seg_idx, t = self.T2t(T)
return self._segments[seg_idx].unit_tangent(t)
def normal(self, t):
"""returns the (right hand rule) unit normal vector to self at t."""
return -1j*self.unit_tangent(t)
def curvature(self, T):
"""returns the curvature of this Path object at T and outputs
float('inf') if not differentiable at T."""
seg_idx, t = self.T2t(T)
seg = self[seg_idx]
if np.isclose(t, 0) and (seg_idx != 0 or self.end==self.start):
previous_seg_in_path = self._segments[
(seg_idx - 1) % len(self._segments)]
if not seg.joins_smoothl_with(previous_seg_in_path):
return float('inf')
elif np.isclose(t, 1) and (seg_idx != len(self) - 1 or self.end==self.start):
next_seg_in_path = self._segments[
(seg_idx + 1) % len(self._segments)]
if not next_seg_in_path.joins_smoothly_with(seg):
return float('inf')
dz = self.derivative(t)
ddz = self.derivative(t, n=2)
dx, dy = dz.real, dz.imag
ddx, ddy = ddz.real, ddz.imag
return abs(dx*ddy - dy*ddx)/(dx*dx + dy*dy)**1.5
# def icurvature(self, kappa):
# """returns a list of T-values such that 0 <= T <= 1 and
# seg.curvature(t) = kappa.
# Note: not implemented for paths containing Arc segments."""
# assert is_bezier_path(self)
# Ts = []
# for i, seg in enumerate(self):
# Ts += [self.t2T(i, t) for t in seg.icurvature(kappa)]
# return Ts
def area(self):
"""returns the area enclosed by this Path object.
Note: negative area results from CW (as opposed to CCW)
parameterization of the Path object."""
assert self.isclosed()
area_enclosed = 0
for seg in self:
x = real(seg.poly())
dy = imag(seg.poly()).deriv()
integrand = x*dy
integral = integrand.integ()
area_enclosed += integral(1) - integral(0)
return area_enclosed
def intersect(self, other_curve, justonemode=False, tol=1e-12):
"""returns list of pairs of pairs ((T1, seg1, t1), (T2, seg2, t2))
giving the intersection points.
If justonemode==True, then returns just the first
intersection found.
tol is used to check for redundant intersections (see comment above
the code block where tol is used).
Note: If the two path objects coincide for more than a finite set of
points, this code will fail."""
path1 = self
if isinstance(other_curve, Path):
path2 = other_curve
else:
path2 = Path(other_curve)
assert path1 != path2
intersection_list = []
for seg1 in path1:
for seg2 in path2:
if justonemode and intersection_list:
return intersection_list[0]
for t1, t2 in seg1.intersect(seg2, tol=tol):
T1 = path1.t2T(seg1, t1)
T2 = path2.t2T(seg2, t2)
intersection_list.append(((T1, seg1, t1), (T2, seg2, t2)))
if justonemode and intersection_list:
return intersection_list[0]
# Note: If the intersection takes place at a joint (point one seg ends
# and next begins in path) then intersection_list may contain a
# redundant intersection. This code block checks for and removes said
# redundancies.
if intersection_list:
pts = [seg1.point(_t1) for _T1, _seg1, _t1 in list(zip(*intersection_list))[0]]
indices2remove = []
for ind1 in range(len(pts)):
for ind2 in range(ind1 + 1, len(pts)):
if abs(pts[ind1] - pts[ind2]) < tol:
# then there's a redundancy. Remove it.
indices2remove.append(ind2)
intersection_list = [inter for ind, inter in
enumerate(intersection_list) if
ind not in indices2remove]
return intersection_list
def bbox(self):
"""returns a bounding box for the input Path object in the form
(xmin, xmax, ymin, ymax)."""
bbs = [seg.bbox() for seg in self._segments]
xmins, xmaxs, ymins, ymaxs = list(zip(*bbs))
xmin = min(xmins)
xmax = max(xmaxs)
ymin = min(ymins)
ymax = max(ymaxs)
return xmin, xmax, ymin, ymax
def cropped(self, T0, T1):
"""returns a cropped copy of the path."""
assert 0 <= T0 <= 1 and 0 <= T1<= 1
assert T0 != T1
assert not (T0 == 1 and T1 == 0)
if T0 == 1 and 0 < T1 < 1 and self.isclosed():
return self.cropped(0, T1)
if T1 == 1:
seg1 = self[-1]
t_seg1 = 1
i1 = len(self) - 1
else:
seg1_idx, t_seg1 = self.T2t(T1)
seg1 = self[seg1_idx]
if np.isclose(t_seg1, 0):
i1 = (self.index(seg1) - 1) % len(self)
seg1 = self[i1]
t_seg1 = 1
else:
i1 = self.index(seg1)
if T0 == 0:
seg0 = self[0]
t_seg0 = 0
i0 = 0
else:
seg0_idx, t_seg0 = self.T2t(T0)
seg0 = self[seg0_idx]
if np.isclose(t_seg0, 1):
i0 = (self.index(seg0) + 1) % len(self)
seg0 = self[i0]
t_seg0 = 0
else:
i0 = self.index(seg0)
if T0 < T1 and i0 == i1:
new_path = Path(seg0.cropped(t_seg0, t_seg1))
else:
new_path = Path(seg0.cropped(t_seg0, 1))
# T1<T0 must cross discontinuity case
if T1 < T0:
if not self.isclosed():
raise ValueError("This path is not closed, thus T0 must "
"be less than T1.")
else:
for i in range(i0 + 1, len(self)):
new_path.append(self[i])
for i in range(0, i1):
new_path.append(self[i])
# T0<T1 straight-forward case
else:
for i in range(i0 + 1, i1):
new_path.append(self[i])
if t_seg1 != 0:
new_path.append(seg1.cropped(0, t_seg1))
return new_path
def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min, idx_min), (d_max, t_max, idx_max)
which minimize and maximize, respectively, the distance
d = |self[idx].point(t)-origin|."""
if return_all_global_extrema:
raise NotImplementedError
else:
global_min = (np.inf, None, None)
global_max = (0, None, None)
for seg_idx, seg in enumerate(self):
seg_global_min, seg_global_max = seg.radialrange(origin)
if seg_global_min[0] < global_min[0]:
global_min = seg_global_min + (seg_idx,)
if seg_global_max[0] > global_max[0]:
global_max = seg_global_max + (seg_idx,)
return global_min, global_max
def rotated(self, degs, origin=None):
"""Returns a copy of self rotated by `degs` degrees (CCW) around the
point `origin` (a complex number). By default `origin` is either
`self.point(0.5)`, or in the case that self is an Arc object,
`origin` defaults to `self.center`."""
return rotate(self, degs, origin=origin)
def translated(self, z0):
"""Returns a copy of self shifted by the complex quantity `z0` such
that self.translated(z0).point(t) = self.point(t) + z0 for any t."""
return translate(self, z0)
| 40.486173 | 115 | 0.574606 |
d91fb4e83f54a6a94862692ba476fc7bbe3c91f3 | 622 | py | Python | app/tests/test_saveNewsDetail.py | xieningtao/maxleap_server | 6be5be1b4fccef72df85fd230a8a5c9210fb6d27 | [
"CC0-1.0"
] | null | null | null | app/tests/test_saveNewsDetail.py | xieningtao/maxleap_server | 6be5be1b4fccef72df85fd230a8a5c9210fb6d27 | [
"CC0-1.0"
] | null | null | null | app/tests/test_saveNewsDetail.py | xieningtao/maxleap_server | 6be5be1b4fccef72df85fd230a8a5c9210fb6d27 | [
"CC0-1.0"
] | null | null | null | import ML
import function.saveNewsDetail
import hook.hooks
import job
import json
from nose.tools import with_setup
from ML import Server
def setup_func():
ML.init(
"57f9edc887d4a7e337b8c231",
master_key="elhmazJfd29ZTFBhR0M3SmJ0R2N6UQ",
)
@with_setup(setup_func)
def test_saveNewsDetail():
fileName = "/Users/mac/Downloads/newsDetail.json"
fileObject = open(fileName)
try:
newsDetail = fileObject.read()
except:
newsDetail = ""
response = Server.callFunction('saveNewsDetail', data=json.dumps({"content":newsDetail}))
print "response: "+response.data | 24.88 | 93 | 0.709003 |
273aa8d6db1cdd86bdd22205cd92f05f45defdd0 | 2,303 | py | Python | src/testcase/GN_Y201J/case/GN_Y201J_ELECTRICITY_METER/GN_Y201J_ELECTRICITY_METER_002.py | maiyajj/AutoTest_script-Appium_Connect | f9c2c42c281a9e2f984acb4a72dda0694b053f22 | [
"Apache-2.0"
] | 28 | 2017-11-10T00:19:16.000Z | 2022-02-19T16:42:05.000Z | src/testcase/GN_Y201J/case/GN_Y201J_ELECTRICITY_METER/GN_Y201J_ELECTRICITY_METER_002.py | maiyajj/AutoTest_script-Appium_Connect | f9c2c42c281a9e2f984acb4a72dda0694b053f22 | [
"Apache-2.0"
] | null | null | null | src/testcase/GN_Y201J/case/GN_Y201J_ELECTRICITY_METER/GN_Y201J_ELECTRICITY_METER_002.py | maiyajj/AutoTest_script-Appium_Connect | f9c2c42c281a9e2f984acb4a72dda0694b053f22 | [
"Apache-2.0"
] | 23 | 2017-08-22T06:12:19.000Z | 2021-09-18T05:45:41.000Z | # coding=utf-8
from src.testcase.GN_Y201J.WidgetOperation import *
class GNY201JElectricityMeter2(WidgetOperation):
@case_run(False)
def run(self):
self.case_module = u"电量计量" # 用例所属模块
self.case_title = u'单一电价验证' # 用例名称
self.zentao_id = "1138" # 禅道ID
# 用例动作
def case(self):
self.choose_home_device(conf["MAC"]["JD"][0])
self.set_power("power_on")
self.ac.swipe(0.5, 0.9, 0.5, 0.7, self.driver)
self.widget_click(self.page["control_device_page"]["set_elec"],
self.page["set_elec_page"]["title"])
self.widget_click(self.page["set_elec_page"]["single_button"],
self.page["set_elec_page"]["title"])
self.widget_click(self.page["set_elec_page"]["single_price"],
self.page["single_price_page"]["title"])
elec_price = self.widget_click(self.page["single_price_page"]["set_price"],
self.page["single_price_page"]["title"])
elec_price_data = "5"
elec_price.clear()
self.ac.send_keys(elec_price, elec_price_data, self.driver)
self.debug.info(u'[APP_INPUT] ["单一电价"] input success')
time.sleep(0.5)
self.widget_click(self.page["single_price_page"]["to_return"],
self.page["set_elec_page"]["title"])
self.widget_click(self.page["set_elec_page"]["to_return"],
self.page["control_device_page"]["title"])
attribute = self.ac.get_attribute(self.wait_widget(self.page["control_device_page"]["set_elec"]), "name")
if u"单一电价" not in attribute:
raise TimeoutException("set signal price is wrong, current mode is %s" % [attribute])
# self.ac.swipe(0.5, 0.7, 0.5, 0.9, self.driver)
now_h = int(time.strftime("%H"))
elec, elec_bill = self.get_device_elect(now_h + 2, True)
elec_bill_info = ("elec bill is wrong, current [elec_bill: %s, elec: %s, elec_price: %s]"
% (sum(elec_bill.values()), sum(elec.values()), elec_price_data))
self.debug.info(elec_bill_info)
if sum(elec_bill.values()) != sum(elec.values()) * int(elec_price_data):
raise TimeoutException(elec_bill_info)
| 39.033898 | 113 | 0.598784 |
c2bf2823dc89e35cb491926c69e4bcbc690e84ed | 6,891 | py | Python | megatron/microbatches.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | 2,869 | 2019-03-22T04:45:32.000Z | 2022-03-31T14:47:42.000Z | megatron/microbatches.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | 161 | 2019-04-23T21:00:16.000Z | 2022-03-27T15:33:17.000Z | megatron/microbatches.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | 567 | 2019-04-05T22:17:47.000Z | 2022-03-31T04:45:25.000Z | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron number of micro-batches calculators."""
from abc import ABC
from abc import abstractmethod
def build_num_microbatches_calculator(args):
# Constant num micro-batches.
if args.rampup_batch_size is None:
num_microbatches_calculator = ConstantNumMicroBatches(
args.global_batch_size, args.micro_batch_size,
args.data_parallel_size)
if args.rank == 0:
print('setting number of micro-batches to constant {}'.format(
num_microbatches_calculator.get()), flush=True)
else:
assert len(args.rampup_batch_size) == 3, 'expected the following ' \
'format: --rampup-batch-size <start batch size> ' \
'<batch size incerement> <ramp-up samples>'
start_batch_size = int(args.rampup_batch_size[0])
batch_size_increment = int(args.rampup_batch_size[1])
ramup_samples = int(args.rampup_batch_size[2])
if args.rank == 0:
print('will use batch size rampup starting from global batch '
'size {} to global batch size {} with batch size increments '
'{} over {} samples.'.format(start_batch_size,
args.global_batch_size,
batch_size_increment,
ramup_samples), flush=True)
num_microbatches_calculator = RampupBatchsizeNumMicroBatches(
start_batch_size, batch_size_increment, ramup_samples,
args.global_batch_size, args.micro_batch_size,
args.data_parallel_size)
return num_microbatches_calculator
class NumMicroBatchesCalculator(ABC):
def __init__(self):
self.num_micro_batches = None
self.current_global_batch_size = None
def get(self):
return self.num_micro_batches
def get_current_global_batch_size(self):
return self.current_global_batch_size
@abstractmethod
def update(self, consumed_samples, consistency_check):
pass
class ConstantNumMicroBatches(NumMicroBatchesCalculator):
def __init__(self, global_batch_size, micro_batch_size, data_parallel_size):
micro_batch_times_data_parallel = micro_batch_size * \
data_parallel_size
assert global_batch_size % micro_batch_times_data_parallel == 0, \
'global batch size ({}) is not divisible by micro batch size ({})' \
' times data parallel size ({})'.format(global_batch_size,
micro_batch_size,
data_parallel_size)
self.num_micro_batches = global_batch_size // \
micro_batch_times_data_parallel
assert self.num_micro_batches >= 1
self.current_global_batch_size = global_batch_size
def update(self, consumed_samples, consistency_check):
pass
class RampupBatchsizeNumMicroBatches(NumMicroBatchesCalculator):
def __init__(self, start_batch_size, batch_size_increment, ramup_samples,
global_batch_size, micro_batch_size, data_parallel_size):
"""Batch size ramp up.
Over
steps = (global-batch-size - start-batch-size) / batch_size_increment
increment batch size from start-batch-size to global-batch-size using
rampup-samples / steps
samples.
Arguments:
start_batch_size: global batch size to start with
batch_size_increment: global batch size increments
ramup_samples: number of samples to use ramp up global
batch size from `start_batch_size` to `global_batch_size`
global_batch_size: global batch size post rampup
micro_batch_size: micro batch size
data_parallel_size: data parallel size.
"""
self.micro_batch_size = micro_batch_size
self.data_parallel_size = data_parallel_size
self.micro_batch_times_data_parallel_size = self.micro_batch_size * \
self.data_parallel_size
assert self.micro_batch_times_data_parallel_size > 0
assert start_batch_size > 0
self.start_batch_size = start_batch_size
assert global_batch_size > 0
self.global_batch_size = global_batch_size
diff_batch_size = self.global_batch_size - self.start_batch_size
assert diff_batch_size >= 0
assert batch_size_increment > 0
self.batch_size_increment = batch_size_increment
assert diff_batch_size % batch_size_increment == 0, 'expected ' \
'global batch size interval ({}) to be divisible by global batch ' \
'size increment ({})'.format(diff_batch_size, batch_size_increment)
num_increments = diff_batch_size // self.batch_size_increment
self.ramup_samples = ramup_samples
assert self.ramup_samples >= 0
self.rampup_samples_per_increment = self.ramup_samples / num_increments
# Initialize number of microbatches.
self.update(0, False)
def update(self, consumed_samples, consistency_check):
if consumed_samples > self.ramup_samples:
self.current_global_batch_size = self.global_batch_size
else:
steps = int(consumed_samples / self.rampup_samples_per_increment)
self.current_global_batch_size = self.start_batch_size + \
steps * self.batch_size_increment
assert self.current_global_batch_size <= self.global_batch_size
if consistency_check:
assert self.current_global_batch_size % \
self.micro_batch_times_data_parallel_size == 0, 'current global ' \
'batch size ({}) is not divisible by micro-batch-size ({}) times' \
'data parallel size ({})'.format(self.current_global_batch_size,
self.micro_batch_size,
self.data_parallel_size)
self.num_micro_batches = self.current_global_batch_size // \
self.micro_batch_times_data_parallel_size
| 43.613924 | 83 | 0.648817 |
b21f7028ea466cff0ececc8c7f7da99a89a04b6e | 1,476 | py | Python | SubmissionScoreTracker/submissionscoretracker.py | zatherz/reddit | bd4378ff62e893d28fa824df5678c6de4021b123 | [
"MIT"
] | 444 | 2015-01-04T02:31:53.000Z | 2022-03-22T05:57:08.000Z | SubmissionScoreTracker/submissionscoretracker.py | zatherz/reddit | bd4378ff62e893d28fa824df5678c6de4021b123 | [
"MIT"
] | 12 | 2015-05-21T07:56:59.000Z | 2020-02-18T06:26:39.000Z | SubmissionScoreTracker/submissionscoretracker.py | zatherz/reddit | bd4378ff62e893d28fa824df5678c6de4021b123 | [
"MIT"
] | 199 | 2015-01-02T14:14:07.000Z | 2022-02-12T14:00:09.000Z | import praw
import sys
import time
import traceback
USERAGENT = ""
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
try:
import bot
USERAGENT = bot.aG
APP_ID = bot.oG_id
APP_SECRET = bot.oG_secret
APP_URI = bot.oG_uri
APP_REFRESH = bot.oG_scopes['all']
except ImportError:
pass
print('logging in')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def submissionscoretracker(submissionid):
if '_' not in submissionid:
submissionid = 't3_' + submissionid
submission = r.get_info(thing_id=submissionid)
outfile = open(submission.fullname + '.txt', 'a+')
last_refresh = time.time()
while True:
try:
if time.time() - last_refresh:
r.refresh_access_information()
last_refresh = time.time()
submission.refresh()
print('%s, %d' % (time.strftime('%H:%M:%S'), submission.score))
outfile.write('%d, %d\n' % (int(time.time()), submission.score))
outfile.flush()
except KeyboardInterrupt:
outfile.close()
return
except:
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) == 1:
submissionid = input('id: ')
else:
submissionid = sys.argv[1]
submissionscoretracker(submissionid) | 26.836364 | 76 | 0.626694 |
8107263fb7a3028a2a02cd934870e8f6eb38b785 | 91 | py | Python | pytreelib/__init__.py | akashbw/pytreelib | 6d26f52825c8c455c1ae1df4ce1925f26dd0a0a0 | [
"MIT"
] | 1 | 2020-02-03T04:05:13.000Z | 2020-02-03T04:05:13.000Z | pytreelib/__init__.py | akashbw/pytreelib | 6d26f52825c8c455c1ae1df4ce1925f26dd0a0a0 | [
"MIT"
] | null | null | null | pytreelib/__init__.py | akashbw/pytreelib | 6d26f52825c8c455c1ae1df4ce1925f26dd0a0a0 | [
"MIT"
] | null | null | null | __version__ = "0.1.1"
from .node import BinaryTreeNode
from .tree import BinarySearchTree
| 18.2 | 34 | 0.791209 |
b7ebc2080facbf1e8f86d7e4fe20234d266938f0 | 3,550 | py | Python | src/years/2020/21/solution.py | cpallapolu/advent-of-code-2020 | aae18e28273f09694a3b9bceeee71edc3c88e048 | [
"MIT"
] | null | null | null | src/years/2020/21/solution.py | cpallapolu/advent-of-code-2020 | aae18e28273f09694a3b9bceeee71edc3c88e048 | [
"MIT"
] | null | null | null | src/years/2020/21/solution.py | cpallapolu/advent-of-code-2020 | aae18e28273f09694a3b9bceeee71edc3c88e048 | [
"MIT"
] | null | null | null |
from collections import defaultdict
from typing import Dict, List, Set
from aocpuzzle import AoCPuzzle
class Puzzle21(AoCPuzzle):
def common(self, input_data: List[str]) -> None:
self.ingredients = set()
self.allergens: Set[str] = set()
self.foods = []
for line in input_data:
ingredients, allergens = line.split(' (contains ')
allergens = allergens[:-1]
self.ingredients.update(ingredients.split(' '))
self.allergens.update(map(lambda a: a.replace(',', ''), allergens.split(' ')))
self.foods.append(
(
ingredients.split(' '),
list(map(lambda x: x.replace(',', ''), allergens.split(' '))),
),
)
self.allergen_map: Dict[str, Set[str]] = defaultdict(set)
self.no_allergen_ingredients = self.ingredients.copy()
def check_foods(self) -> None:
for allergen in self.allergens:
ingredient_options: Set[str] = set()
for food_ingredients, food_allergens in self.foods:
# if the allergen is in this food too, get the common(intersection) of current
# ingredient_options and the ingredients of the food
if allergen in food_allergens:
if len(ingredient_options) == 0:
ingredient_options = set(food_ingredients)
else:
ingredient_options = ingredient_options.intersection(food_ingredients)
self.allergen_map[allergen] = ingredient_options
for ingredient_option in ingredient_options:
self.no_allergen_ingredients.discard(ingredient_option)
def part1(self) -> int:
self.check_foods()
num_ingredients = 0
for no_allergen_ingredient in self.no_allergen_ingredients:
for food_ingredients, _ in self.foods:
if no_allergen_ingredient in food_ingredients:
num_ingredients += 1
return num_ingredients
def part2(self) -> str:
self.check_foods()
ingredient_allergen = defaultdict(str)
allergens = list(self.allergen_map.keys())
while len(allergens) > 0:
for allergen in allergens:
if len(self.allergen_map[allergen]) == 1:
ingredient_allergen[allergen] = self.allergen_map[allergen].pop()
allergens.remove(allergen)
for a in allergens:
if ingredient_allergen[allergen] in self.allergen_map[a]:
self.allergen_map[a].remove(ingredient_allergen[allergen])
continue
return ','.join(
map(
lambda am: am[1],
sorted(ingredient_allergen.items(), key=lambda x: x[0]),
),
)
def test_cases(self, input_data: List[str]) -> int:
tests = [
'mxmxvkd kfcds sqjhc nhms (contains dairy, fish)',
'trh fvjkl sbzzf mxmxvkd (contains dairy)',
'sqjhc fvjkl (contains soy)',
'sqjhc mxmxvkd sbzzf (contains fish)',
]
self.common(tests)
assert self.part1() == 5
self.common(tests)
assert self.part2() == 'mxmxvkd,sqjhc,fvjkl'
self.common(input_data)
assert self.part1() == 2317
self.common(input_data)
assert self.part2() == 'kbdgs,sqvv,slkfgq,vgnj,brdd,tpd,csfmb,lrnz'
return 2
| 35.5 | 94 | 0.569014 |
2102a3d88246716d27a23359afa33d28a5a16867 | 1,596 | py | Python | sklearn_genetic/tests/test_plots.py | kari-d/Sklearn-genetic-opt | e4a76a5a4c73d456c3a0b426f5703e77c3175e66 | [
"MIT"
] | 1 | 2021-06-24T08:36:28.000Z | 2021-06-24T08:36:28.000Z | sklearn_genetic/tests/test_plots.py | kari-d/Sklearn-genetic-opt | e4a76a5a4c73d456c3a0b426f5703e77c3175e66 | [
"MIT"
] | null | null | null | sklearn_genetic/tests/test_plots.py | kari-d/Sklearn-genetic-opt | e4a76a5a4c73d456c3a0b426f5703e77c3175e66 | [
"MIT"
] | null | null | null | import pytest
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from .. import GASearchCV
from ..plots import plot_fitness_evolution, plot_search_space
from ..space import Integer, Categorical, Continuous
data = load_boston()
y = data["target"]
X = data["data"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
clf = DecisionTreeRegressor()
evolved_estimator = GASearchCV(
clf,
cv=2,
scoring="r2",
population_size=8,
generations=5,
tournament_size=3,
elitism=True,
crossover_probability=0.9,
mutation_probability=0.05,
param_grid={
"ccp_alpha": Continuous(0, 1),
"criterion": Categorical(["mse", "mae"]),
"max_depth": Integer(2, 20),
"min_samples_split": Integer(2, 30),
},
criteria="max",
n_jobs=-1,
)
evolved_estimator.fit(X_train, y_train)
def test_plot_evolution():
plot = plot_fitness_evolution(evolved_estimator)
with pytest.raises(Exception) as excinfo:
plot = plot_fitness_evolution(evolved_estimator, metric="accuracy")
assert (
str(excinfo.value)
== "metric must be one of ['fitness', 'fitness_std', 'fitness_max', 'fitness_min'], "
"but got accuracy instead"
)
def test_plot_space():
plot = plot_search_space(evolved_estimator)
plot = plot_search_space(evolved_estimator)
plot = plot_search_space(
evolved_estimator, features=["ccp_alpha", "max_depth", "min_samples_split"]
)
| 24.9375 | 93 | 0.696742 |
3e723e4202b1440edec51086d0b1d8bb27f9f053 | 20,156 | py | Python | gpMgmt/bin/gppylib/system/configurationImplGpdb.py | haolinw/gpdb | 16a9465747a54f0c61bac8b676fe7611b4f030d8 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/system/configurationImplGpdb.py | haolinw/gpdb | 16a9465747a54f0c61bac8b676fe7611b4f030d8 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/system/configurationImplGpdb.py | haolinw/gpdb | 16a9465747a54f0c61bac8b676fe7611b4f030d8 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2022-03-18T03:08:11.000Z | 2022-03-18T03:08:11.000Z | #!/usr/bin/env python3
#
# Copyright (c) Greenplum Inc 2010. All Rights Reserved.
# Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved.
#
"""
This file defines the interface that can be used to fetch and update system
configuration information.
"""
import os, copy
from collections import defaultdict
from gppylib.gplog import *
from gppylib.utils import checkNotNone
from gppylib.system.configurationInterface import *
from gppylib.system.ComputeCatalogUpdate import ComputeCatalogUpdate
from gppylib.gparray import GpArray, Segment, InvalidSegmentConfiguration
from gppylib import gparray
from gppylib.db import dbconn
from gppylib.commands.gp import get_local_db_mode
logger = get_default_logger()
class GpConfigurationProviderUsingGpdbCatalog(GpConfigurationProvider) :
"""
An implementation of GpConfigurationProvider will provide functionality to
fetch and update gpdb system configuration information (as stored in the
database)
Note that the client of this is assuming that the database data is not
changed by another party between the time segment data is loaded and when it
is updated
"""
def __init__(self):
self.__coordinatorDbUrl = None
def initializeProvider( self, coordinatorPort ) :
"""
Initialize the provider to get information from the given coordinator db, if
it chooses to get its data from the database
returns self
"""
checkNotNone("coordinatorPort", coordinatorPort)
dbUrl = dbconn.DbURL(port=coordinatorPort, dbname='template1')
self.__coordinatorDbUrl = dbUrl
return self
def loadSystemConfig( self, useUtilityMode, verbose=True ) :
"""
Load all segment information from the configuration source.
Returns a new GpArray object
"""
# ensure initializeProvider() was called
checkNotNone("coordinatorDbUrl", self.__coordinatorDbUrl)
if verbose :
logger.info("Obtaining Segment details from coordinator...")
array = GpArray.initFromCatalog(self.__coordinatorDbUrl, useUtilityMode)
if get_local_db_mode(array.coordinator.getSegmentDataDirectory()) != 'UTILITY':
logger.debug("Validating configuration...")
if not array.is_array_valid():
raise InvalidSegmentConfiguration(array)
return array
def updateSystemConfig( self, gpArray, textForConfigTable, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary) :
"""
Update the configuration for the given segments in the underlying
configuration store to match the current values
Also resets any dirty bits on saved/updated objects
@param textForConfigTable label to be used when adding to segment configuration history
@param dbIdToForceMirrorRemoveAdd a map of dbid -> True for mirrors for which we should force updating the mirror
@param useUtilityMode True if the operations we're doing are expected to run via utility moed
@param allowPrimary True if caller authorizes add/remove primary operations (e.g. gpexpand)
"""
# ensure initializeProvider() was called
checkNotNone("coordinatorDbUrl", self.__coordinatorDbUrl)
logger.debug("Validating configuration changes...")
if not gpArray.is_array_valid():
logger.critical("Configuration is invalid")
raise InvalidSegmentConfiguration(gpArray)
conn = dbconn.connect(self.__coordinatorDbUrl, useUtilityMode, allowSystemTableMods=True)
dbconn.execSQL(conn, "BEGIN")
# compute what needs to be updated
update = ComputeCatalogUpdate(gpArray, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary)
update.validate()
# put the mirrors in a map by content id so we can update them later
mirror_map = {}
for seg in update.mirror_to_add:
mirror_map[ seg.getSegmentContentId() ] = seg
# create a map by dbid in which to put backout SQL statements
backout_map = defaultdict(list)
# remove mirror segments (e.g. for gpexpand rollback)
for seg in update.mirror_to_remove:
addSQL = self.updateSystemConfigRemoveMirror(conn, gpArray, seg, textForConfigTable)
backout_map[seg.getSegmentDbId()].append(addSQL)
backout_map[seg.getSegmentDbId()].append(self.getPeerNotInSyncSQL(gpArray, seg))
# remove primary segments (e.g for gpexpand rollback)
for seg in update.primary_to_remove:
addSQL = self.updateSystemConfigRemovePrimary(conn, gpArray, seg, textForConfigTable)
backout_map[seg.getSegmentDbId()].append(addSQL)
backout_map[seg.getSegmentDbId()].append(self.getPeerNotInSyncSQL(gpArray, seg))
# add new primary segments
for seg in update.primary_to_add:
removeSQL = self.updateSystemConfigAddPrimary(conn, gpArray, seg, textForConfigTable, mirror_map)
backout_map[seg.getSegmentDbId()].append(removeSQL)
backout_map[seg.getSegmentDbId()].append(self.getPeerNotInSyncSQL(gpArray, seg))
# add new mirror segments
for seg in update.mirror_to_add:
removeSQL = self.updateSystemConfigAddMirror(conn, gpArray, seg, textForConfigTable)
backout_map[seg.getSegmentDbId()].append(removeSQL)
backout_map[seg.getSegmentDbId()].append(self.getPeerNotInSyncSQL(gpArray, seg))
# remove and add mirror segments necessitated by catalog attribute update
for seg in update.mirror_to_remove_and_add:
addSQL, removeSQL = self.updateSystemConfigRemoveAddMirror(conn, gpArray, seg, textForConfigTable)
backout_map[seg.getSegmentDbId()].append(removeSQL)
backout_map[seg.getSegmentDbId()].append(addSQL)
backout_map[seg.getSegmentDbId()].append(self.getPeerNotInSyncSQL(gpArray, seg))
# apply updates to existing segments
for seg in update.segment_to_update:
originalSeg = update.dbsegmap.get(seg.getSegmentDbId())
self.__updateSystemConfigUpdateSegment(conn, gpArray, seg, originalSeg, textForConfigTable)
# commit changes
logger.debug("Committing configuration table changes")
dbconn.execSQL(conn, "COMMIT")
conn.close()
gpArray.setSegmentsAsLoadedFromDb([seg.copy() for seg in gpArray.getDbList()])
return backout_map
def updateSystemConfigRemoveMirror(self, conn, gpArray, seg, textForConfigTable):
"""
Remove a mirror segment currently in gp_segment_configuration
but not present in the goal configuration and record our action
in gp_configuration_history.
"""
dbId = seg.getSegmentDbId()
addSQL = self.__callSegmentRemoveMirror(conn, gpArray, seg)
self.__insertConfigHistory(conn, dbId, "%s: removed mirror segment configuration" % textForConfigTable)
return addSQL
def updateSystemConfigRemovePrimary(self, conn, gpArray, seg, textForConfigTable):
"""
Remove a primary segment currently in gp_segment_configuration
but not present in the goal configuration and record our action
in gp_configuration_history.
"""
dbId = seg.getSegmentDbId()
addSQL = self.__callSegmentRemove(conn, gpArray, seg)
self.__insertConfigHistory(conn, dbId, "%s: removed primary segment configuration" % textForConfigTable)
return addSQL
def updateSystemConfigAddPrimary(self, conn, gpArray, seg, textForConfigTable, mirror_map):
"""
Add a primary segment specified in our goal configuration but
which is missing from the current gp_segment_configuration table
and record our action in gp_configuration_history.
"""
# lookup the mirror (if any) so that we may correct its content id
mirrorseg = mirror_map.get( seg.getSegmentContentId() )
# add the new segment
dbId, removeSQL = self.__callSegmentAdd(conn, gpArray, seg)
# gp_add_segment_primary() will update the mode and status.
# get the newly added segment's content id
sql = "SELECT content FROM pg_catalog.gp_segment_configuration WHERE dbId = %s" % self.__toSqlIntValue(seg.getSegmentDbId())
logger.debug(sql)
sqlResult = self.fetchSingleOutputRow(conn, sql)
contentId = int(sqlResult[0])
# Set the new content id for the primary as well the mirror if present.
seg.setSegmentContentId(contentId)
if mirrorseg is not None:
mirrorseg.setSegmentContentId(contentId)
self.__insertConfigHistory(conn, dbId, "%s: inserted primary segment configuration with contentid %s" % (textForConfigTable, contentId))
return removeSQL
def updateSystemConfigAddMirror(self, conn, gpArray, seg, textForConfigTable):
"""
Add a mirror segment specified in our goal configuration but
which is missing from the current gp_segment_configuration table
and record our action in gp_configuration_history.
"""
dbId, removeSQL = self.__callSegmentAddMirror(conn, gpArray, seg)
self.__insertConfigHistory(conn, dbId, "%s: inserted mirror segment configuration" % textForConfigTable)
return removeSQL
def updateSystemConfigRemoveAddMirror(self, conn, gpArray, seg, textForConfigTable):
"""
We've been asked to update the mirror in a manner that require
it to be removed and then re-added. Perform the tasks
and record our action in gp_configuration_history.
"""
origDbId = seg.getSegmentDbId()
addSQL = self.__callSegmentRemoveMirror(conn, gpArray, seg)
dbId, removeSQL = self.__callSegmentAddMirror(conn, gpArray, seg, removeAndAdd=True)
self.__insertConfigHistory(conn, seg.getSegmentDbId(),
"%s: inserted segment configuration for full recovery or original dbid %s" \
% (textForConfigTable, origDbId))
return addSQL, removeSQL
def __updateSystemConfigUpdateSegment(self, conn, gpArray, seg, originalSeg, textForConfigTable):
# update mode and status
#
what = "%s: segment mode and status"
self.__updateSegmentModeStatus(conn, seg)
self.__insertConfigHistory(conn, seg.getSegmentDbId(), what % textForConfigTable)
# This is a helper function for creating backout scripts, since we need to use the original segment information,
# not the segment information after it has been updated to facilitate recovery. Not all code paths result in the
# segments-as-loaded array being populated, hence the None checks.
def __getSegmentAsLoaded(self, gpArray, seg):
segments = gpArray.getSegmentsAsLoadedFromDb()
if segments is not None:
matching_segment = [s for s in segments if s.getSegmentDbId() == seg.getSegmentDbId()]
if matching_segment:
return matching_segment[0]
return seg
def __getConfigurationHistorySQL(self, dbid):
sql = ";\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t%s,\n\t%s\n)" \
% (
self.__toSqlIntValue(dbid),
"'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid %d'" % dbid,
)
return sql
#
# The below __callSegment[Action][Target] functions return the SQL statements to reverse the changes they make
# (not the SQL statements they actually call), to be used to generate backout scripts to reverse the changes later.
#
def __callSegmentRemoveMirror(self, conn, gpArray, seg):
"""
Call gp_remove_segment_mirror() to remove the mirror.
"""
sql = self.__getSegmentRemoveMirrorSQL(seg)
logger.debug(sql)
result = self.fetchSingleOutputRow(conn, sql)
assert result[0] # must return True
return self.__getSegmentAddSQL(self.__getSegmentAsLoaded(gpArray, seg), backout=True)
def __getSegmentRemoveMirrorSQL(self, seg, backout=False):
sql = "SELECT gp_remove_segment_mirror(%s::int2)" % (self.__toSqlIntValue(seg.getSegmentContentId()))
if backout:
sql += self.__getConfigurationHistorySQL(seg.getSegmentDbId())
return sql
def __callSegmentRemove(self, conn, gpArray, seg):
"""
Call gp_remove_segment() to remove the primary.
"""
sql = self.__getSegmentRemoveSQL(seg)
logger.debug(sql)
result = self.fetchSingleOutputRow(conn, sql)
assert result[0]
return self.__getSegmentAddMirrorSQL(self.__getSegmentAsLoaded(gpArray, seg), backout=True)
def __getSegmentRemoveSQL(self, seg, backout=False, removeAndAdd=False):
sql = "SELECT gp_remove_segment(%s::int2)" % (self.__toSqlIntValue(seg.getSegmentDbId()))
# Don't generate a configuration history line in the updateSystemConfigRemoveAddMirror case,
# to avoid duplication; the later call to __getSegmentAddSQL will take care of that.
if backout and not removeAndAdd:
sql += self.__getConfigurationHistorySQL(seg.getSegmentDbId())
return sql
def __callSegmentAdd(self, conn, gpArray, seg):
"""
Ideally, should call gp_add_segment_primary() to add the
primary. But due to chicken-egg problem, need dbid for
creating the segment but can't add to catalog before creating
segment. Hence, instead using gp_add_segment() which takes
dbid and registers in catalog using the same. Return the new
segment's dbid.
"""
logger.debug('callSegmentAdd %s' % repr(seg))
sql = self.__getSegmentAddSQL(seg)
logger.debug(sql)
sqlResult = self.fetchSingleOutputRow(conn, sql)
dbId = int(sqlResult[0])
removeSQL = self.__getSegmentRemoveMirrorSQL(self.__getSegmentAsLoaded(gpArray, seg), backout=True)
seg.setSegmentDbId(dbId)
return dbId, removeSQL
def __getSegmentAddSQL(self, seg, backout=False):
sql = "SELECT gp_add_segment(%s::int2, %s::int2, '%s', '%s', 'n', '%s', %s, %s, %s, %s)" \
% (
self.__toSqlIntValue(seg.getSegmentDbId()),
self.__toSqlIntValue(seg.getSegmentContentId()),
'm' if backout else 'p',
seg.getSegmentPreferredRole(),
'd' if backout else 'u',
self.__toSqlIntValue(seg.getSegmentPort()),
self.__toSqlTextValue(seg.getSegmentHostName()),
self.__toSqlTextValue(seg.getSegmentAddress()),
self.__toSqlTextValue(seg.getSegmentDataDirectory()),
)
if backout:
sql += self.__getConfigurationHistorySQL(seg.getSegmentDbId())
return sql
def __callSegmentAddMirror(self, conn, gpArray, seg, removeAndAdd=False):
"""
Similar to __callSegmentAdd, ideally we should call gp_add_segment_mirror() to add the mirror.
But chicken-egg problem also exists in mirror case. If we use gp_add_segment_mirror(),
new dbid will be chosen by `get_availableDbId()`, which cannot ensure to be same as dbid
in internal.auto.conf(see issue-9837). Refer to __callSegmentAdd for details.
"""
logger.debug('callSegmentAddMirror %s' % repr(seg))
sql = self.__getSegmentAddMirrorSQL(seg)
logger.debug(sql)
sqlResult = self.fetchSingleOutputRow(conn, sql)
dbId = int(sqlResult[0])
removeSQL = self.__getSegmentRemoveSQL(self.__getSegmentAsLoaded(gpArray, seg), backout=True, removeAndAdd=removeAndAdd)
seg.setSegmentDbId(dbId)
return dbId, removeSQL
def __getSegmentAddMirrorSQL(self, seg, backout=False):
#TODO should we use seg.getSegmentPreferredRole()
sql = "SELECT gp_add_segment(%s::int2, %s::int2, 'm', 'm', 'n', 'd', %s, %s, %s, %s)" \
% (
self.__toSqlIntValue(seg.getSegmentDbId()),
self.__toSqlIntValue(seg.getSegmentContentId()),
self.__toSqlIntValue(seg.getSegmentPort()),
self.__toSqlTextValue(seg.getSegmentHostName()),
self.__toSqlTextValue(seg.getSegmentAddress()),
self.__toSqlTextValue(seg.getSegmentDataDirectory()),
)
if backout:
sql += self.__getConfigurationHistorySQL(seg.getSegmentDbId())
return sql
# This function generates a statement to update the mode of a given segment's peer to
# "not in sync", something that is necessary to do for every segment that is added in a
# backout script. The gp_add_segment function sets the added segment's mode to 'n', and
# if its peer's mode doesn't match (is still set to 's') when the next query is executed,
# this will almost certainly crash the cluster.
def getPeerNotInSyncSQL(self, gpArray, seg):
peerMap = gpArray.getDbIdToPeerMap()
dbid = seg.getSegmentDbId()
if dbid in peerMap: # The dbid may not be in the peer map, if e.g. we're getting here from gpexpand, in which case no action is necessary
peerSegment = peerMap[dbid]
updateStmt = "SET allow_system_table_mods=true;\nUPDATE gp_segment_configuration SET mode = 'n' WHERE dbid = %d;"
return updateStmt % peerSegment.getSegmentDbId()
return ""
def __updateSegmentModeStatus(self, conn, seg):
# run an update
sql = "UPDATE pg_catalog.gp_segment_configuration\n" + \
" SET\n" + \
" mode = " + self.__toSqlCharValue(seg.getSegmentMode()) + ",\n" \
" status = " + self.__toSqlCharValue(seg.getSegmentStatus()) + "\n" \
"WHERE dbid = " + self.__toSqlIntValue(seg.getSegmentDbId()) + ";"
logger.debug(sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
def fetchSingleOutputRow(self, conn, sql, retry=False):
"""
Execute specified SQL command and return what we expect to be a single row.
Raise an exception when more or fewer than one row is seen and when more
than one row is seen display up to 10 rows as logger warnings.
"""
cursor = dbconn.query(conn, sql)
numrows = cursor.rowcount
numshown = 0
res = None
for row in cursor:
if numrows != 1:
#
# if we got back more than one row
# we print a few of the rows first
# instead of immediately raising an exception
#
numshown += 1
if numshown > 10:
break
logger.warning('>>> %s' % row)
else:
assert res is None
res = row
assert res is not None
cursor.close()
if numrows != 1:
raise Exception("SQL returned %d rows, not 1 as expected:\n%s" % (numrows, sql))
return res
def __insertConfigHistory(self, conn, dbId, msg ):
# now update change history
sql = "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n" \
"now(),\n " + \
self.__toSqlIntValue(dbId) + ",\n " + \
self.__toSqlCharValue(msg) + "\n)"
logger.debug(sql)
dbconn.executeUpdateOrInsert(conn, sql, 1)
def __toSqlIntValue(self, val):
if val is None:
return "null"
return str(val)
def __toSqlArrayStringValue(self, val):
if val is None:
return "null"
return '"' + val.replace('"','\\"').replace('\\','\\\\') + '"'
def __toSqlCharValue(self, val):
return self.__toSqlTextValue(val)
def __toSqlTextValue(self, val):
if val is None:
return "null"
return "'" + val.replace("'","''").replace('\\','\\\\') + "'"
| 43.817391 | 145 | 0.661887 |
5a3fb61c644e4394762a92c896075c1ef22bafc4 | 1,376 | py | Python | setup.py | QuiNovas/athena-type-convertor | 775e6deeccd1be4a486363bcdbff57ce594849f0 | [
"Apache-2.0"
] | 2 | 2020-07-18T00:23:27.000Z | 2021-05-15T14:23:55.000Z | setup.py | QuiNovas/athena-type-converter | 775e6deeccd1be4a486363bcdbff57ce594849f0 | [
"Apache-2.0"
] | null | null | null | setup.py | QuiNovas/athena-type-converter | 775e6deeccd1be4a486363bcdbff57ce594849f0 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
# What does your project relate to?
app_keywords = 'quinovas'
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
app_install_requires = []
setup(
name='athena-type-converter',
version='0.0.3',
description='Helper functions to convert types returned from Athena into Python types',
long_description='Helper functions to convert types returned from Athena into Python types',
url='https://github.com/QuiNovas/athena-type-converter',
author='Joseph Wortmann',
author_email='joseph.wortmann@gmail.com',
license='APL 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Topic :: System :: Software Distribution',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
],
keywords='quinovas',
install_requires=[],
package_dir={'': 'src'},
packages=find_packages('src'),
)
| 26.461538 | 96 | 0.695494 |
1647aec4df37f689fd764b271e93285d8d6b5568 | 1,357 | py | Python | docs/conf.py | eniovianna/py_dss_tools | 3057fb0b74facd05a362e4e4a588f79f70aa9dd7 | [
"MIT"
] | null | null | null | docs/conf.py | eniovianna/py_dss_tools | 3057fb0b74facd05a362e4e4a588f79f70aa9dd7 | [
"MIT"
] | null | null | null | docs/conf.py | eniovianna/py_dss_tools | 3057fb0b74facd05a362e4e4a588f79f70aa9dd7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sphinx_py3doc_enhanced_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'py-dss-tools'
year = '2021'
author = 'Paulo Radatz'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.0.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://https://github.com/PauloRadatz/py_dss_tools/py_dss_tools/py-dss-tools/issues/%s', '#'),
'pr': ('https://https://github.com/PauloRadatz/py_dss_tools/py_dss_tools/py-dss-tools/pull/%s', 'PR #'),
}
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://https://github.com/PauloRadatz/py_dss_tools/py_dss_tools/py-dss-tools/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = f'{project}-{version}'
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| 28.270833 | 110 | 0.705232 |
92e00a8fe877687f749ff92e0a4fc9ad376fddb5 | 1,201 | py | Python | Interview Preparation Kit/Warm-up Challenges/Repeated Strings.py | karangehlod/Hackerrank | 8a934642eccfb6f179f0833e8be68379fdc8206e | [
"MIT"
] | null | null | null | Interview Preparation Kit/Warm-up Challenges/Repeated Strings.py | karangehlod/Hackerrank | 8a934642eccfb6f179f0833e8be68379fdc8206e | [
"MIT"
] | null | null | null | Interview Preparation Kit/Warm-up Challenges/Repeated Strings.py | karangehlod/Hackerrank | 8a934642eccfb6f179f0833e8be68379fdc8206e | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the repeatedString function below.
def repeatedString(s, n):
count = 0
result = 0
lengthOfString = len(s)
remainderc = 0
for i in range(lengthOfString):
if(s[i] == 'a'):
count += 1
print(count)
numofullrepeatation = (n//lengthOfString)
if numofullrepeatation != 0:
remainder = n-(numofullrepeatation*lengthOfString)
for x in range(remainder):
if(s[x] == 'a'):
remainderc += 1
elif lengthOfString > n: # case23
lengthOfString = n
for i in range(lengthOfString):
if(s[i] == 'a'):
remainderc += 1
print(numofullrepeatation)
print(remainderc)
result = (numofullrepeatation*count)+remainderc
print(result)
# for i in range(numofullrepeatation):
return result
# print(lengthOfString)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
n = int(input())
result = repeatedString(s, n)
fptr.write(str(result) + '\n')
fptr.close()
| 21.446429 | 59 | 0.567027 |
9c9abf4f95879a0e16379404daa1bd93fc474785 | 24,748 | py | Python | site/flask/lib/python2.7/site-packages/whoosh/codec/base.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 319 | 2016-09-22T15:54:48.000Z | 2022-03-18T02:36:58.000Z | site/flask/lib/python2.7/site-packages/whoosh/codec/base.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 9 | 2016-11-03T21:56:41.000Z | 2020-08-09T19:27:37.000Z | site/flask/lib/python2.7/site-packages/whoosh/codec/base.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 27 | 2016-10-06T16:05:32.000Z | 2022-03-18T02:37:00.000Z | # Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains base classes/interfaces for "codec" objects.
"""
from bisect import bisect_right
from whoosh import columns
from whoosh.compat import text_type
from whoosh.compat import abstractmethod, izip, xrange
from whoosh.filedb.compound import CompoundStorage
from whoosh.system import emptybytes
from whoosh.util import random_name
# Exceptions
class OutOfOrderError(Exception):
pass
# Base classes
class Codec(object):
length_stats = True
# Per document value writer
@abstractmethod
def per_document_writer(self, storage, segment):
raise NotImplementedError
# Inverted index writer
@abstractmethod
def field_writer(self, storage, segment):
raise NotImplementedError
# Postings
@abstractmethod
def postings_writer(self, dbfile, byteids=False):
raise NotImplementedError
@abstractmethod
def postings_reader(self, dbfile, terminfo, format_, term=None, scorer=None):
raise NotImplementedError
# Index readers
@abstractmethod
def terms_reader(self, storage, segment):
raise NotImplementedError
@abstractmethod
def per_document_reader(self, storage, segment):
raise NotImplementedError
def supports_graph(self):
return False
# Don't need to override this if supports_graph() return False
def graph_reader(self, storage, segment):
raise NotImplementedError
# Segments and generations
@abstractmethod
def new_segment(self, storage, indexname):
raise NotImplementedError
class WrappingCodec(Codec):
def __init__(self, child):
self._child = child
def per_document_writer(self, storage, segment):
return self._child.per_document_writer(storage, segment)
def field_writer(self, storage, segment):
return self._child.field_writer(storage, segment)
def postings_writer(self, dbfile, byteids=False):
return self._child.postings_writer(dbfile, byteids=byteids)
def postings_reader(self, dbfile, terminfo, format_, term=None, scorer=None):
return self._child.postings_reader(dbfile, terminfo, format_, term=term,
scorer=scorer)
def terms_reader(self, storage, segment):
return self._child.terms_reader(storage, segment)
def per_document_reader(self, storage, segment):
return self._child.per_document_reader(storage, segment)
def supports_graph(self):
return self._child.supports_graph()
def graph_reader(self, storage, segment):
return self._child.graph_reader(storage, segment)
def new_segment(self, storage, indexname):
return self._child.new_segment(storage, indexname)
# Writer classes
class PerDocumentWriter(object):
@abstractmethod
def start_doc(self, docnum):
raise NotImplementedError
@abstractmethod
def add_field(self, fieldname, fieldobj, value, length):
raise NotImplementedError
@abstractmethod
def add_column_value(self, fieldname, columnobj, value):
raise NotImplementedError("Codec does not implement writing columns")
@abstractmethod
def add_vector_items(self, fieldname, fieldobj, items):
raise NotImplementedError
def add_vector_matcher(self, fieldname, fieldobj, vmatcher):
def readitems():
while vmatcher.is_active():
text = vmatcher.id()
weight = vmatcher.weight()
valuestring = vmatcher.value()
yield (text, weight, valuestring)
vmatcher.next()
self.add_vector_items(fieldname, fieldobj, readitems())
def finish_doc(self):
pass
def close(self):
pass
class FieldWriter(object):
def add_postings(self, schema, lengths, items):
# This method translates a generator of (fieldname, btext, docnum, w, v)
# postings into calls to start_field(), start_term(), add(),
# finish_term(), finish_field(), etc.
start_field = self.start_field
start_term = self.start_term
add = self.add
finish_term = self.finish_term
finish_field = self.finish_field
if lengths:
dfl = lengths.doc_field_length
else:
dfl = lambda docnum, fieldname: 0
# The fieldname of the previous posting
lastfn = None
# The bytes text of the previous posting
lasttext = None
# The (fieldname, btext) of the previous spelling posting
lastspell = None
# The field object for the current field
fieldobj = None
for fieldname, btext, docnum, weight, value in items:
# Check for out-of-order postings. This is convoluted because Python
# 3 removed the ability to compare a string to None
if lastfn is not None and fieldname < lastfn:
raise OutOfOrderError("Field %r .. %r" % (lastfn, fieldname))
if fieldname == lastfn and lasttext and btext < lasttext:
raise OutOfOrderError("Term %s:%r .. %s:%r"
% (lastfn, lasttext, fieldname, btext))
# If the fieldname of this posting is different from the last one,
# tell the writer we're starting a new field
if fieldname != lastfn:
if lasttext is not None:
finish_term()
if lastfn is not None and fieldname != lastfn:
finish_field()
fieldobj = schema[fieldname]
start_field(fieldname, fieldobj)
lastfn = fieldname
lasttext = None
# HACK: items where docnum == -1 indicate words that should be added
# to the spelling graph, not the postings
if docnum == -1:
spellterm = (fieldname, btext)
# There can be duplicates of spelling terms, so only add a spell
# term if it's greater than the last one
if lastspell is None or spellterm > lastspell:
spellword = fieldobj.from_bytes(btext)
self.add_spell_word(fieldname, spellword)
lastspell = spellterm
continue
# If this term is different from the term in the previous posting,
# tell the writer to start a new term
if btext != lasttext:
if lasttext is not None:
finish_term()
start_term(btext)
lasttext = btext
# Add this posting
length = dfl(docnum, fieldname)
if value is None:
value = emptybytes
add(docnum, weight, value, length)
if lasttext is not None:
finish_term()
if lastfn is not None:
finish_field()
@abstractmethod
def start_field(self, fieldname, fieldobj):
raise NotImplementedError
@abstractmethod
def start_term(self, text):
raise NotImplementedError
@abstractmethod
def add(self, docnum, weight, vbytes, length):
raise NotImplementedError
def add_spell_word(self, fieldname, text):
raise NotImplementedError
@abstractmethod
def finish_term(self):
raise NotImplementedError
def finish_field(self):
pass
def close(self):
pass
# Postings
class PostingsWriter(object):
@abstractmethod
def start_postings(self, format_, terminfo):
raise NotImplementedError
@abstractmethod
def add_posting(self, id_, weight, vbytes, length=None):
raise NotImplementedError
def finish_postings(self):
pass
@abstractmethod
def written(self):
"""Returns True if this object has already written to disk.
"""
raise NotImplementedError
# Reader classes
class TermsReader(object):
@abstractmethod
def __contains__(self, term):
raise NotImplementedError
@abstractmethod
def terms(self):
raise NotImplementedError
@abstractmethod
def terms_from(self, fieldname, prefix):
raise NotImplementedError
@abstractmethod
def items(self):
raise NotImplementedError
@abstractmethod
def items_from(self, fieldname, prefix):
raise NotImplementedError
@abstractmethod
def term_info(self, fieldname, text):
raise NotImplementedError
@abstractmethod
def frequency(self, fieldname, text):
return self.term_info(fieldname, text).weight()
@abstractmethod
def doc_frequency(self, fieldname, text):
return self.term_info(fieldname, text).doc_frequency()
@abstractmethod
def matcher(self, fieldname, text, format_, scorer=None):
raise NotImplementedError
@abstractmethod
def indexed_field_names(self):
raise NotImplementedError
def close(self):
pass
# Per-doc value reader
class PerDocumentReader(object):
def close(self):
pass
@abstractmethod
def doc_count(self):
raise NotImplementedError
@abstractmethod
def doc_count_all(self):
raise NotImplementedError
# Deletions
@abstractmethod
def has_deletions(self):
raise NotImplementedError
@abstractmethod
def is_deleted(self, docnum):
raise NotImplementedError
@abstractmethod
def deleted_docs(self):
raise NotImplementedError
def all_doc_ids(self):
"""Returns an iterator of all (undeleted) document IDs in the reader.
"""
is_deleted = self.is_deleted
return (docnum for docnum in xrange(self.doc_count_all())
if not is_deleted(docnum))
def iter_docs(self):
for docnum in self.all_doc_ids():
yield docnum, self.stored_fields(docnum)
# Columns
def supports_columns(self):
return False
def has_column(self, fieldname):
return False
def list_columns(self):
raise NotImplementedError
# Don't need to override this if supports_columns() returns False
def column_reader(self, fieldname, column):
raise NotImplementedError
# Bitmaps
def field_docs(self, fieldname):
return None
# Lengths
@abstractmethod
def doc_field_length(self, docnum, fieldname, default=0):
raise NotImplementedError
@abstractmethod
def field_length(self, fieldname):
raise NotImplementedError
@abstractmethod
def min_field_length(self, fieldname):
raise NotImplementedError
@abstractmethod
def max_field_length(self, fieldname):
raise NotImplementedError
# Vectors
def has_vector(self, docnum, fieldname):
return False
# Don't need to override this if has_vector() always returns False
def vector(self, docnum, fieldname, format_):
raise NotImplementedError
# Stored
@abstractmethod
def stored_fields(self, docnum):
raise NotImplementedError
def all_stored_fields(self):
# Must yield stored fields for deleted documents too
for docnum in xrange(self.doc_count_all()):
yield self.stored_fields(docnum)
# Segment base class
class Segment(object):
"""Do not instantiate this object directly. It is used by the Index object
to hold information about a segment. A list of objects of this class are
pickled as part of the TOC file.
The TOC file stores a minimal amount of information -- mostly a list of
Segment objects. Segments are the real reverse indexes. Having multiple
segments allows quick incremental indexing: just create a new segment for
the new documents, and have the index overlay the new segment over previous
ones for purposes of reading/search. "Optimizing" the index combines the
contents of existing segments into one (removing any deleted documents
along the way).
"""
# Extension for compound segment files
COMPOUND_EXT = ".seg"
# self.indexname
# self.segid
def __init__(self, indexname):
self.indexname = indexname
self.segid = self._random_id()
self.compound = False
@classmethod
def _random_id(cls, size=16):
return random_name(size=size)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.segment_id())
def codec(self):
raise NotImplementedError
def index_name(self):
return self.indexname
def segment_id(self):
if hasattr(self, "name"):
# Old segment class
return self.name
else:
return "%s_%s" % (self.index_name(), self.segid)
def is_compound(self):
if not hasattr(self, "compound"):
return False
return self.compound
# File convenience methods
def make_filename(self, ext):
return "%s%s" % (self.segment_id(), ext)
def list_files(self, storage):
prefix = "%s." % self.segment_id()
return [name for name in storage.list() if name.startswith(prefix)]
def create_file(self, storage, ext, **kwargs):
"""Convenience method to create a new file in the given storage named
with this segment's ID and the given extension. Any keyword arguments
are passed to the storage's create_file method.
"""
fname = self.make_filename(ext)
return storage.create_file(fname, **kwargs)
def open_file(self, storage, ext, **kwargs):
"""Convenience method to open a file in the given storage named with
this segment's ID and the given extension. Any keyword arguments are
passed to the storage's open_file method.
"""
fname = self.make_filename(ext)
return storage.open_file(fname, **kwargs)
def create_compound_file(self, storage):
segfiles = self.list_files(storage)
assert not any(name.endswith(self.COMPOUND_EXT) for name in segfiles)
cfile = self.create_file(storage, self.COMPOUND_EXT)
CompoundStorage.assemble(cfile, storage, segfiles)
for name in segfiles:
storage.delete_file(name)
self.compound = True
def open_compound_file(self, storage):
name = self.make_filename(self.COMPOUND_EXT)
dbfile = storage.open_file(name)
return CompoundStorage(dbfile, use_mmap=storage.supports_mmap)
# Abstract methods
@abstractmethod
def doc_count_all(self):
"""
Returns the total number of documents, DELETED OR UNDELETED, in this
segment.
"""
raise NotImplementedError
def doc_count(self):
"""
Returns the number of (undeleted) documents in this segment.
"""
return self.doc_count_all() - self.deleted_count()
def set_doc_count(self, doccount):
raise NotImplementedError
def has_deletions(self):
"""
Returns True if any documents in this segment are deleted.
"""
return self.deleted_count() > 0
@abstractmethod
def deleted_count(self):
"""
Returns the total number of deleted documents in this segment.
"""
raise NotImplementedError
@abstractmethod
def deleted_docs(self):
raise NotImplementedError
@abstractmethod
def delete_document(self, docnum, delete=True):
"""Deletes the given document number. The document is not actually
removed from the index until it is optimized.
:param docnum: The document number to delete.
:param delete: If False, this undeletes a deleted document.
"""
raise NotImplementedError
@abstractmethod
def is_deleted(self, docnum):
"""
Returns True if the given document number is deleted.
"""
raise NotImplementedError
def should_assemble(self):
return True
# Wrapping Segment
class WrappingSegment(Segment):
def __init__(self, child):
self._child = child
def codec(self):
return self._child.codec()
def index_name(self):
return self._child.index_name()
def segment_id(self):
return self._child.segment_id()
def is_compound(self):
return self._child.is_compound()
def should_assemble(self):
return self._child.should_assemble()
def make_filename(self, ext):
return self._child.make_filename(ext)
def list_files(self, storage):
return self._child.list_files(storage)
def create_file(self, storage, ext, **kwargs):
return self._child.create_file(storage, ext, **kwargs)
def open_file(self, storage, ext, **kwargs):
return self._child.open_file(storage, ext, **kwargs)
def create_compound_file(self, storage):
return self._child.create_compound_file(storage)
def open_compound_file(self, storage):
return self._child.open_compound_file(storage)
def delete_document(self, docnum, delete=True):
return self._child.delete_document(docnum, delete=delete)
def has_deletions(self):
return self._child.has_deletions()
def deleted_count(self):
return self._child.deleted_count()
def deleted_docs(self):
return self._child.deleted_docs()
def is_deleted(self, docnum):
return self._child.is_deleted(docnum)
def set_doc_count(self, doccount):
self._child.set_doc_count(doccount)
def doc_count(self):
return self._child.doc_count()
def doc_count_all(self):
return self._child.doc_count_all()
# Multi per doc reader
class MultiPerDocumentReader(PerDocumentReader):
def __init__(self, readers, offset=0):
self._readers = readers
self._doc_offsets = []
self._doccount = 0
for pdr in readers:
self._doc_offsets.append(self._doccount)
self._doccount += pdr.doc_count_all()
self.is_closed = False
def close(self):
for r in self._readers:
r.close()
self.is_closed = True
def doc_count_all(self):
return self._doccount
def doc_count(self):
total = 0
for r in self._readers:
total += r.doc_count()
return total
def _document_reader(self, docnum):
return max(0, bisect_right(self._doc_offsets, docnum) - 1)
def _reader_and_docnum(self, docnum):
rnum = self._document_reader(docnum)
offset = self._doc_offsets[rnum]
return rnum, docnum - offset
# Deletions
def has_deletions(self):
return any(r.has_deletions() for r in self._readers)
def is_deleted(self, docnum):
x, y = self._reader_and_docnum(docnum)
return self._readers[x].is_deleted(y)
def deleted_docs(self):
for r, offset in izip(self._readers, self._doc_offsets):
for docnum in r.deleted_docs():
yield docnum + offset
def all_doc_ids(self):
for r, offset in izip(self._readers, self._doc_offsets):
for docnum in r.all_doc_ids():
yield docnum + offset
# Columns
def has_column(self, fieldname):
return any(r.has_column(fieldname) for r in self._readers)
def column_reader(self, fieldname, column):
if not self.has_column(fieldname):
raise ValueError("No column %r" % (fieldname,))
default = column.default_value()
colreaders = []
for r in self._readers:
if r.has_column(fieldname):
cr = r.column_reader(fieldname, column)
else:
cr = columns.EmptyColumnReader(default, r.doc_count_all())
colreaders.append(cr)
if len(colreaders) == 1:
return colreaders[0]
else:
return columns.MultiColumnReader(colreaders)
# Lengths
def doc_field_length(self, docnum, fieldname, default=0):
x, y = self._reader_and_docnum(docnum)
return self._readers[x].doc_field_length(y, fieldname, default)
def field_length(self, fieldname):
total = 0
for r in self._readers:
total += r.field_length(fieldname)
return total
def min_field_length(self):
return min(r.min_field_length() for r in self._readers)
def max_field_length(self):
return max(r.max_field_length() for r in self._readers)
# Extended base classes
class PerDocWriterWithColumns(PerDocumentWriter):
def __init__(self):
PerDocumentWriter.__init__(self)
# Implementations need to set these attributes
self._storage = None
self._segment = None
self._docnum = None
@abstractmethod
def _has_column(self, fieldname):
raise NotImplementedError
@abstractmethod
def _create_column(self, fieldname, column):
raise NotImplementedError
@abstractmethod
def _get_column(self, fieldname):
raise NotImplementedError
def add_column_value(self, fieldname, column, value):
if not self._has_column(fieldname):
self._create_column(fieldname, column)
self._get_column(fieldname).add(self._docnum, value)
class CodecWithGraph(Codec):
FST_EXT = ".fst" # FSA/FST graph file
def supports_graph(self):
return True
def graph_reader(self, storage, segment):
from whoosh.automata.fst import GraphReader
from whoosh.reading import NoGraphError
filename = segment.make_filename(self.FST_EXT)
if not storage.file_exists(filename):
raise NoGraphError
return GraphReader(storage.open_file(filename))
class FieldWriterWithGraph(FieldWriter):
def __init__(self):
FieldWriter.__init__(self)
# Implementations need to set these attributes
self._storage = None
self._segment = None
self._fieldname = None
self._fieldobj = None
FST_EXT = CodecWithGraph.FST_EXT
def _prep_graph(self):
from whoosh.automata.fst import GraphWriter
gf = self._segment.create_file(self._storage, self.FST_EXT)
self._gwriter = GraphWriter(gf)
def _start_graph_field(self, fieldname, fieldobj):
spelling = fieldobj.spelling
separate = fieldobj.separate_spelling()
self._needs_graph = spelling or separate
self._auto_graph = spelling and not separate
if self._needs_graph:
if not hasattr(self, "_gwriter") or self._gwriter is None:
self._prep_graph()
self._gwriter.start_field(fieldname)
def _insert_graph_key(self, btext):
if self._auto_graph:
key = self._fieldobj.from_bytes(btext)
self.add_spell_word(self._fieldname, key)
def add_spell_word(self, fieldname, word):
assert fieldname == self._fieldname
assert isinstance(word, text_type)
self._gwriter.insert(word)
def _finish_graph_field(self):
if self._needs_graph:
self._gwriter.finish_field()
def _close_graph(self):
if hasattr(self, "_gwriter") and self._gwriter:
self._gwriter.close()
| 29.287574 | 81 | 0.656255 |
3db54d99929d6bb7149ed5da34e65451660abd6b | 382 | py | Python | custom_components/smthost/const.py | kdeyev/smthost | d144282a15e0d3bbdbb58dffbe916d1472fdcb12 | [
"MIT"
] | null | null | null | custom_components/smthost/const.py | kdeyev/smthost | d144282a15e0d3bbdbb58dffbe916d1472fdcb12 | [
"MIT"
] | null | null | null | custom_components/smthost/const.py | kdeyev/smthost | d144282a15e0d3bbdbb58dffbe916d1472fdcb12 | [
"MIT"
] | null | null | null | """Constants for the Smart Meter Texas integration."""
from datetime import timedelta
SCAN_INTERVAL = timedelta(hours=1)
DEBOUNCE_COOLDOWN = 1800 # Seconds
DATA_COORDINATOR = "coordinator"
DATA_SMART_METER = "smart_meter_data"
DOMAIN = "smthost"
METER_NUMBER = "meter_number"
ESIID = "electric_service_identifier"
LAST_UPDATE = "last_updated"
ELECTRIC_METER = "Electric Meter"
| 23.875 | 54 | 0.790576 |
ddfc0a232ee6c9c657689b46a45c2b731ce21c75 | 1,018 | py | Python | benchmark/graphics.py | Algorithms-and-Data-Structures-2021/splay-tree | a5f9c2a1bed80ced7f5e1249c5bebb7d16fc1747 | [
"MIT"
] | null | null | null | benchmark/graphics.py | Algorithms-and-Data-Structures-2021/splay-tree | a5f9c2a1bed80ced7f5e1249c5bebb7d16fc1747 | [
"MIT"
] | null | null | null | benchmark/graphics.py | Algorithms-and-Data-Structures-2021/splay-tree | a5f9c2a1bed80ced7f5e1249c5bebb7d16fc1747 | [
"MIT"
] | 2 | 2021-04-10T15:48:24.000Z | 2021-06-02T13:33:59.000Z | import matplotlib.pyplot as plt
if __name__ == '__main__':
plt.figure(figsize=(10, 10))
plt.xlabel('elements')
plt.ylabel('time(µs)')
operation = str(input())
ox = [100, 500, 1000, 5000, 10000, 25000, 50000, 100000, 250000, 500000, 1000000, 2500000, 5000000]
if operation == 'remove':
oy = [12.57, 86.12, 217.07, 1428.04, 3358.01, 10402.94, 24529.08, 62695.43, 234452.42, 598181.25, 1530410.00,
5012860.00, 12045210.00]
plt.title('remove')
plt.plot(ox, oy)
if operation == 'insert':
oy = [58.75, 242.61, 462.58, 2633.95, 5439.71, 14756.11, 35263.11, 79928.70, 253441.84, 607762.14,
1403230.00, 4137390.00, 8561870.00]
plt.title('insert')
plt.plot(ox, oy)
if operation == 'search':
oy = [71.69, 508.86, 613.16, 2935.28, 6307.20, 18557.58, 43231.63, 101145.21, 309374.37, 309374.37,
1579580.00, 4310460.00, 10076320.00]
plt.title('search')
plt.plot(ox, oy)
plt.show()
| 36.357143 | 117 | 0.584479 |
b28eda635677a8a26136d10af98bee1909f605d0 | 201 | py | Python | cat_blog/users/tests/test_models.py | turamant/cat_blog | 0a04978db78d805f6468626ab23454ffa52b2411 | [
"MIT"
] | null | null | null | cat_blog/users/tests/test_models.py | turamant/cat_blog | 0a04978db78d805f6468626ab23454ffa52b2411 | [
"MIT"
] | 4 | 2021-03-30T14:29:55.000Z | 2021-06-10T19:56:22.000Z | cat_blog/users/tests/test_models.py | turamant/cat_blog | 0a04978db78d805f6468626ab23454ffa52b2411 | [
"MIT"
] | null | null | null | import pytest
from cat_blog.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 20.1 | 64 | 0.771144 |
3e74407fc2adfd224e351d138c76605e15a43d67 | 560 | py | Python | checkk2fov/test.py | danxhuber/k2epic | 743587da578f187a6c069fbe02e5d4a5cadd3a98 | [
"MIT"
] | 2 | 2015-11-25T05:03:05.000Z | 2016-02-09T03:56:05.000Z | checkk2fov/test.py | danxhuber/k2epic | 743587da578f187a6c069fbe02e5d4a5cadd3a98 | [
"MIT"
] | null | null | null | checkk2fov/test.py | danxhuber/k2epic | 743587da578f187a6c069fbe02e5d4a5cadd3a98 | [
"MIT"
] | null | null | null | import pdb
import numpy as np
import matplotlib.pyplot as plt
f = open('/Users/daniel/science/K2/EPIC/deliveries/d14260_03_epic_c7_dmc.mrg', 'r')
n=0
for line in f:
n=n+1
print(n)
f.close()
ra=np.zeros(n)
dec=np.zeros(n)
f = open('/Users/daniel/science/K2/EPIC/deliveries/d14260_03_epic_c7_dmc.mrg', 'r')
n=0
for line in f:
if (n % 10000 != 0):
n=n+1
continue
print(n)
line = line.strip()
columns = line.split("|")
ra[n]=columns[9]
dec[n]=columns[10]
n=n+1
use=np.where(ra > 0)[0]
ra=ra[use]
dec=dec[use]
pdb.set_trace() | 14.358974 | 83 | 0.648214 |
5d22d9de6bee2f407431aedcde7842a3b4d42fc8 | 2,190 | py | Python | lammps-master/tools/i-pi/ipi/utils/softexit.py | rajkubp020/helloword | 4bd22691de24b30a0f5b73821c35a7ac0666b034 | [
"MIT"
] | null | null | null | lammps-master/tools/i-pi/ipi/utils/softexit.py | rajkubp020/helloword | 4bd22691de24b30a0f5b73821c35a7ac0666b034 | [
"MIT"
] | null | null | null | lammps-master/tools/i-pi/ipi/utils/softexit.py | rajkubp020/helloword | 4bd22691de24b30a0f5b73821c35a7ac0666b034 | [
"MIT"
] | null | null | null | """Utility functions for killing the wrapper softly.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
Softexit: Concise class to manage cleaning up in case of an emergency exit.
"""
import traceback, sys
from ipi.utils.messages import verbosity, warning
__all__ = ['Softexit', 'softexit']
class Softexit(object):
"""Class to deal with stopping a simulation half way through.
Holds the functions used to clean up a simulation that has been
stopped early, either because of a SIGTERM signal or because the
user has added an EXIT file to the directory in which it is
running. This will then properly shut down the socket interface,
and print out a RESTART file for the appropriate time step.
Attributes:
flist: A list of functions used to close down the socket
interface.
"""
def __init__(self):
"""Initializes SoftExit."""
self.flist = []
def register(self, func):
"""Adds another function to flist.
Args:
func: The function to be added to flist.
"""
self.flist.append(func)
def trigger(self, message=""):
"""Halts the simulation.
Prints out a warning message, then runs all the exit functions in flist
before terminating the simulation.
Args:
message: The message to output to standard output.
"""
if message != "":
warning("Soft exit has been requested with message: '" + message + "'. Cleaning up.", verbosity.low)
for f in self.flist:
f()
sys.exit()
softexit = Softexit()
| 29.594595 | 109 | 0.702283 |
f348f160690f15909a2a1fbe1f11e2014f938f2d | 1,970 | py | Python | src/sentry/utils/pytest/kafka.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/utils/pytest/kafka.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/utils/pytest/kafka.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import os
import pytest
import six
from confluent_kafka.admin import AdminClient
from confluent_kafka import Producer
@pytest.fixture
def kafka_producer():
def inner(settings):
producer = Producer(
{"bootstrap.servers": settings.KAFKA_CLUSTERS["default"]["bootstrap.servers"]}
)
return producer
return inner
class _KafkaAdminWrapper:
def __init__(self, request, settings):
self.test_name = request.node.name
kafka_config = {}
for key, val in six.iteritems(settings.KAFKA_CLUSTERS["default"]):
kafka_config[key] = val
self.admin_client = AdminClient(kafka_config)
def delete_topic(self, topic_name):
try:
futures_dict = self.admin_client.delete_topics([topic_name])
self._sync_wait_on_result(futures_dict)
except Exception: # noqa
pass # noqa nothing to do (probably there was no topic to start with)
def _sync_wait_on_result(self, futures_dict):
"""
Synchronously waits on all futures returned by the admin_client api.
:param futures_dict: the api returns a dict of futures that can be awaited
"""
# just wait on all futures returned by the async operations of the admin_client
for f in futures_dict.values():
f.result(5) # wait up to 5 seconds for the admin operation to finish
@pytest.fixture
def kafka_admin(request):
"""
A fixture representing a simple wrapper over the admin interface
:param request: the pytest request
:return: a Kafka admin wrapper
"""
def inner(settings):
return _KafkaAdminWrapper(request, settings)
return inner
@pytest.fixture
def requires_kafka():
pytest.importorskip("confluent_kafka")
if "SENTRY_KAFKA_HOSTS" not in os.environ:
pytest.xfail("test requires SENTRY_KAFKA_HOSTS environment variable which is not set")
| 28.550725 | 94 | 0.68731 |
b2422f1640b08bc71a86eca401d548aeed451431 | 2,242 | py | Python | projeto-02/dijkstra/dijkstra_binary_heap.py | henrique-tavares/IFB-Analise-de-Algoritmos | 36db7672fea45ce8ab9dce5bbe41aec30be18465 | [
"MIT"
] | null | null | null | projeto-02/dijkstra/dijkstra_binary_heap.py | henrique-tavares/IFB-Analise-de-Algoritmos | 36db7672fea45ce8ab9dce5bbe41aec30be18465 | [
"MIT"
] | null | null | null | projeto-02/dijkstra/dijkstra_binary_heap.py | henrique-tavares/IFB-Analise-de-Algoritmos | 36db7672fea45ce8ab9dce5bbe41aec30be18465 | [
"MIT"
] | 1 | 2021-07-15T23:50:37.000Z | 2021-07-15T23:50:37.000Z | from collections import defaultdict
from typing import Dict, List, Set, Tuple
import heapq
import sys
sys.path.append("../../projeto-02")
from graph import Graph
class DijkstraBinaryHeap:
def __init__(self, graph: Graph) -> None:
self.graph = graph
self.paths: Dict[str, Dict[str, Tuple[str, float]]] = defaultdict(dict)
def calculate_shortest_paths(self, source: str) -> Dict[str, Tuple[str, float]]:
if source not in self.graph:
raise ValueError(f"vertice {source} not found in graph {self.graph}")
paths: Dict[str, Tuple[str, float]] = defaultdict(lambda: ("", float("inf")))
paths[source] = ("", 0)
vertices_queue: List[Tuple[float, str]] = list()
visited_vertices: Set[str] = set()
heapq.heappush(vertices_queue, (0, source))
qtd_vertices = len(self.graph.elements.keys())
while len(visited_vertices) < qtd_vertices:
accumulated_distance, current_vertice = heapq.heappop(vertices_queue)
if current_vertice in visited_vertices:
continue
for (next_vertice, distance) in self.graph[current_vertice].items():
new_distance = accumulated_distance + distance
old_distance = paths[next_vertice][1]
if new_distance < old_distance:
paths[next_vertice] = current_vertice, new_distance
heapq.heappush(vertices_queue, (new_distance, next_vertice))
visited_vertices.add(current_vertice)
self.paths[source] = dict(paths)
return dict(paths)
def build_path(self, source: str, destination: str) -> str:
if source not in self.paths:
raise ValueError(f"There are no paths calculated from source vertice: {source}")
if destination not in self.paths[source]:
raise ValueError(f"Destination: {destination} unreacheable from source: {source}")
path: List[str] = list()
current_step = destination
while current_step != "":
path.append(f"{current_step} ({self.paths[source][current_step][1]})")
current_step = self.paths[source][current_step][0]
return " -> ".join(path[::-1])
| 36.16129 | 94 | 0.632025 |
5204b88d5cc7829d01573514a9d849fc667bfdd3 | 4,930 | py | Python | SecondaryStructures.py | LifeWorks/SIEVE | 8ba16402f0a15a0ae040d54cf2674e7d00ef4be0 | [
"BSD-2-Clause"
] | null | null | null | SecondaryStructures.py | LifeWorks/SIEVE | 8ba16402f0a15a0ae040d54cf2674e7d00ef4be0 | [
"BSD-2-Clause"
] | null | null | null | SecondaryStructures.py | LifeWorks/SIEVE | 8ba16402f0a15a0ae040d54cf2674e7d00ef4be0 | [
"BSD-2-Clause"
] | null | null | null | # Usage: python3 SecondaryStructures.py -i Fastas/ --cpu 4 --parallel 2
# (run 2 parallel instances of Porter5 on 4 cores - total of 8 cores)
import os
import sys
import argparse
from multiprocessing import Pool
import pandas as pd
### parallel code ##
def splitFa(filename, faDir):
# catch big fasta
fasta = open(filename, "r").readlines()
# fix formatting
i = 0
aa = ""
while i < len(fasta):
pid = fasta[i].replace(">", "").strip().split()[0]
j = 0
while os.path.isfile(faDir + '/' + pid+str(j) + ".fasta"):
j += 1
aa = ">"+pid+"\n"
i += 1
f = open(faDir + '/' + pid+str(j) + ".fasta", "w")
while i < len(fasta) and fasta[i][0] != ">":
aa = aa + fasta[i].strip()
i += 1
f.write(aa+"\n")
f.close()
def loop(line):
if args.fast and args.tmp:
os.system('python3 %s -i %s --cpu %d --fast --tmp' %
(executable, line, args.cpu))
elif args.fast:
os.system('python3 %s -i %s --cpu %d --fast' %
(executable, line, args.cpu))
elif args.tmp:
os.system('python3 %s -i %s --cpu %d --tmp' %
(executable, line, args.cpu))
else:
os.system('python3 %s -i %s --cpu %d' % (executable, line, args.cpu))
fasta = open(line, "r").readlines()
for s in (".ss3", ".ss8"):
filename = line + s
if os.path.isfile(filename):
ss = "".join(pd.read_csv(filename, index_col=0, sep='\t')
["SS"].to_list())
with open(line[:-6] + s + line[-6:], 'w') as f:
f.write(fasta[0].strip() + '\n' + ss)
else:
print(filename + " dose not exist! Check the problem")
# set argparse
parser = argparse.ArgumentParser(description="This is the standalone of Porter5 for multiple inputs. It is sufficient to specify a directory containing FASTA files to start the prediction of their Secondary Structure in 3- and 8-classes. It is also possible to run multiple predictions in parallel (TOTAL cpu = --cpu x --parallel). Please run Porter5.py if you have only 1 protein sequence to predict.",
epilog="E.g., to run 2 instances of Porter5 on 4 cores (total of 8 cores): python3 multiple_fasta.py -i Fastas/ --cpu 4 --parallel 2")
parser.add_argument("-i", type=str, nargs=1,
help="Indicate the directory containing the FASTA files.")
parser.add_argument("--cpu", type=int, default=1,
help="Specify how many cores to assign to each prediction.")
parser.add_argument("--parallel", type=int, default=1,
help="Specify how many instances to run in parallel.")
parser.add_argument(
"--fast", help="Use only HHblits (skipping PSI-BLAST) to perform a faster prediction.", action="store_true")
parser.add_argument(
"--tmp", help="Leave output files of HHblits and PSI-BLAST, i.e. log, hhr, psi, chk, and blastpgp files.", action="store_true")
parser.add_argument(
"--setup", help="Initialize Porter5 from scratch. Run it when there has been any change involving PSI-BLAST, HHblits, Porter itself, etc.", action="store_true")
args = parser.parse_args()
# check arguments
if not args.i:
print("Usage: python3 " +
sys.argv[0]+" -i <fasta_dir> [--cpu CPU_number] [--parallel instances] [--fast]\n--help for the full list of commands")
exit()
# initialization variables
executable = os.path.abspath(
os.path.dirname(sys.argv[0]))+"/Porter5/Porter5.py"
if not os.path.isfile(executable):
print("\n---->>No executable retrieved at", executable)
exit()
if not os.path.isdir("".join(args.i)):
print("\n---->>", "".join(args.i),
"isn't a directory! Please consider running split_fasta.py.")
exit()
if not os.path.isfile(os.path.abspath(os.path.dirname(sys.argv[0]))+"/Porter5/scripts/config.ini") or args.setup:
os.system("python3 %s --setup" % executable)
# create new directory to save outcome
faDir = os.path.abspath("".join(args.i)) + "/fastas"
if not os.path.exists(faDir):
os.makedirs(faDir)
# fetch all the inputs from the passed directory, and sort them by size
os.chdir("".join(args.i))
# files = []
for filepath in os.listdir(os.getcwd()):
if filepath.endswith(".fasta") or filepath.endswith(".fa"):
# files.append(filepath)
splitFa(filepath, faDir)
# sorted_files = sorted(files, key=os.path.getsize, reverse=True)
#os.system("python3 %s --setup" % executable)
# ligth the bomb // launch the parallel code
os.chdir(faDir)
files = []
for filepath in os.listdir(os.getcwd()):
if filepath.endswith(".fasta"):
files.append(filepath)
sorted_files = sorted(files, key=os.path.getsize, reverse=True)
if args.parallel > 1:
with Pool(args.parallel) as p:
p.map(loop, sorted_files, 1)
else:
loop(sorted_files)
# print(os.getcwd())
| 37.923077 | 403 | 0.618053 |
f005871fce1b4cf064d1967c542cd2f61a7a733d | 1,448 | py | Python | string_matcher.py | nacbotics5/web-scraping-with-python | 5c5d89d58173ee2e6491283d7d5ba0a413d6961c | [
"BSD-3-Clause"
] | 3 | 2019-07-03T13:10:21.000Z | 2020-01-09T10:34:12.000Z | string_matcher.py | nacbotics5/web-scraping-with-python | 5c5d89d58173ee2e6491283d7d5ba0a413d6961c | [
"BSD-3-Clause"
] | null | null | null | string_matcher.py | nacbotics5/web-scraping-with-python | 5c5d89d58173ee2e6491283d7d5ba0a413d6961c | [
"BSD-3-Clause"
] | 1 | 2021-11-08T18:53:12.000Z | 2021-11-08T18:53:12.000Z | #-*-coding:utf8;-*-
from collections import OrderedDict
class string_match(object):
def __init__(self,data_source=None):
self.dictx,self.dicty,self.lists = dict(),dict(),data_source
self.boolean = False
def match_string(self,x,y,prints=False):
" returns the match of x to y as a percentage "
a = set(x.split())
b = set(y.split())
c = float(len(a&b))
d = float(len(a|b))
try:similarity_ratio = round(((c/d)*100/1),2)
except:similarity_ratio = 0
if similarity_ratio >= 1:self.dictx[y] = similarity_ratio
else:pass
if prints:
print(x,y,similarity_ratio)
else:pass
return(similarity_ratio)
def find_match(self,args,func,data_source,prints=False):
for id,string in enumerate(data_source):
func(args,string)
dictz = list(OrderedDict(sorted(self.dictx.items(), key=lambda t: t[1], reverse=True)))
dicts = list(OrderedDict(sorted(self.dicty.items(), key=lambda t: t[1], reverse=True)))
try:
test = dictz[0]
for id,string in enumerate(data_source):
if func(test,string,prints) >=100:
self.dictx = dict()
return((id,string))
else:pass
except IndexError as e:
self.dictx = dict()
return None
self.dictx = dict()
| 32.909091 | 95 | 0.562155 |
5f60726dd8bde8c73c9cc8d538725c9152ce8045 | 13,037 | py | Python | datadrift/detect_drift.py | samsonq/datadrift | c4298d588b5df27992f332c1df5b092c54823b4e | [
"MIT"
] | null | null | null | datadrift/detect_drift.py | samsonq/datadrift | c4298d588b5df27992f332c1df5b092c54823b4e | [
"MIT"
] | null | null | null | datadrift/detect_drift.py | samsonq/datadrift | c4298d588b5df27992f332c1df5b092c54823b4e | [
"MIT"
] | null | null | null | #################################################
# Encapsulates GE Test Suite Creation Functions #
# #
#################################################
"""
Automate process to generate test suite and expectations for dataset, and then create checkpoints to evaluate datasets. These functions encapsulate the
code/process that is run on the notebooks, to make the process of expectation creation and evaluation much simpler, user-friendly, and efficient for
comparing many datasets.
Generates JSON expectations file in '/validations' folder that contains tests and results. Also creates static HTML dashboard that visualizes
expectations and results of new data compared with old data. Please see the 'main' function below as well as the parameters to specify when running.
"""
import os
import sys
from tqdm import tqdm
import numpy as np
import pandas as pd
import datetime
import argparse
import great_expectations as ge
import great_expectations.jupyter_ux
from great_expectations.core.batch import BatchRequest
from great_expectations.profile.user_configurable_profiler import UserConfigurableProfiler
from great_expectations.checkpoint import SimpleCheckpoint
from great_expectations.exceptions import DataContextError
from great_expectations.cli.datasource import sanitize_yaml_and_save_datasource, check_if_datasource_name_exists
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.data_context.types.resource_identifiers import ExpectationSuiteIdentifier
# from ruamel.yaml import YAML
# yaml = YAML()
import yaml
from pprint import pprint
# sys.path.insert("./custom_expectations", 0)
from custom_expectations import categorical_expectations, continuous_expectations, aggregate_expectations, \
custom_expectations, utils # expectations
from parse_results import parse_ge_results # parsing GE JSON result outputs
import warnings
warnings.filterwarnings("ignore")
'''
def parse_args():
"""
Get arguments to run Great Expectations tests.
:return: program arguments
"""
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--datasource_name", type=str, default="data_drift",
help="Name of Data Source")
ap.add_argument("-n", "--datasource_path", type=str, default="data_drift_detection",
help="Name of Expectation Suite")
ap.add_argument("-n", "--overwrite", type=str, default="data_drift_detection",
help="Name of Expectation Suite")
ap.add_argument("-n", "--expectation_suite_name", type=str, default="data_drift_detection",
help="Name of Expectation Suite")
ap.add_argument("-n", "--expectation_data", type=str, default="data_drift_detection",
help="Name of Expectation Suite")
ap.add_argument("-n", "--data_docs", type=str, default="data_drift_detection",
help="Name of Expectation Suite")
ap.add_argument("-n", "--checkpoint_name", type=str, default="data_drift_detection",
help="Name of Expectation Suite")
ap.add_argument("-n", "--checkpoint_data", type=str, default="data_drift_detection",
help="Name of Expectation Suite")
return vars(ap.parse_args())
'''
def create_ge_datasource(datasource_name="data_drift", data_path="../data", overwrite=True):
"""
Step 1 of the Great Expectations process to create a new datasource with the main data.
:param datasource_name: name of new datasource
:param data_path: path to data source
:param overwrite: boolean to overwrite datasource if existing
:return: GE datasources object
"""
context = ge.get_context()
yaml = f"""
name: {datasource_name}
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
default_inferred_data_connector_name:
class_name: InferredAssetFilesystemDataConnector
base_directory: {data_path}
default_regex:
group_names:
- data_asset_name
pattern: (.*)
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
"""
try:
yaml_result = context.test_yaml_config(yaml_config=yaml)
assert len(yaml_result.get_available_data_asset_names()[
"default_inferred_data_connector_name"]) > 0, "No data sources available."
except:
print("Failed to create new GE datasource.")
return
if check_if_datasource_name_exists(context, datasource_name=datasource_name):
if overwrite:
sanitize_yaml_and_save_datasource(context, yaml, overwrite_existing=True)
else:
print("Data source {} already exists. Set overwrite=True to overwrite data source.".format(datasource_name))
return
else:
sanitize_yaml_and_save_datasource(context, yaml, overwrite_existing=False)
return context.list_datasources()
def create_ge_expectations_suite(expectation_suite_name="data_drift_detection",
datasource_name="data_drift",
dataset_name="example_data.csv",
categorical_variables=["Prior_Claims"],
continuous_variables=["Age", "Income"],
data_docs=True):
"""
Step 2 of the Great Expectations process to create a new expectation suite on a GE data source.
:param expectation_suite_name: name of expectation suite to create
:param datasource_name: name of datasource
:param continuous_variables: list of categorical variables to create expectations for
:param continuous_variables: list of continuous variables to create expectations for
:param data_docs: boolean of whether to send checkpoint results to GE Data Docs
:return: number of expectations created for datasource
"""
context = ge.data_context.DataContext()
batch_request = {'datasource_name': datasource_name,
'data_connector_name': 'default_inferred_data_connector_name',
'data_asset_name': dataset_name,
'limit': 1000}
try:
suite = context.get_expectation_suite(expectation_suite_name=expectation_suite_name)
print(
f'Loaded ExpectationSuite "{suite.expectation_suite_name}" containing {len(suite.expectations)} expectations.')
except DataContextError:
suite = context.create_expectation_suite(expectation_suite_name=expectation_suite_name)
print(f'Created ExpectationSuite "{suite.expectation_suite_name}".')
validator = context.get_validator(batch_request=BatchRequest(**batch_request),
expectation_suite_name=expectation_suite_name)
expectation_count = 0
### Create Data Expectations ###
## Table Level Aggregate Expectations ##
# validator.expect_table_row_count_to_be_between(max_value=173610, min_value=173610)
aggregate = aggregate_expectations.AggregateExpectations(validator)
for expectation_config in aggregate.get_expectations():
suite.add_expectation(expectation_configuration=expectation_config)
expectation_count += 1
## Categorical Variables Expectations ##
# Distributional Expectations #
for categorical_var in categorical_variables:
categorical = categorical_expectations.CategoricalExpectations(validator, categorical_var)
for expectation_config in categorical.get_expectations():
suite.add_expectation(expectation_configuration=expectation_config)
expectation_count += 1
## Continuous Variables Expectations ##
# Distributional Expectations #
for continuous_var in continuous_variables:
continuous = continuous_expectations.ContinuousExpectations(validator, continuous_var)
for expectation_config in categorical.get_expectations():
suite.add_expectation(expectation_configuration=expectation_config)
expectation_count += 1
context.save_expectation_suite(expectation_suite=suite, expectation_suite_name=expectation_suite_name)
print(context.get_expectation_suite(expectation_suite_name=expectation_suite_name))
print("{} expectations were created!".format(expectation_count))
if data_docs:
suite_identifier = ExpectationSuiteIdentifier(expectation_suite_name=expectation_suite_name)
context.build_data_docs(resource_identifiers=[suite_identifier])
context.open_data_docs(resource_identifier=suite_identifier)
return expectation_count
def create_ge_checkpoint(checkpoint_name="checkpoint",
expectation_suite_name="data_drift_detection",
datasource_name="data_drift",
new_dataset_name="example_data_for_validation.csv",
data_docs=True):
"""
Step 3 of the Great Expectations process to introduce a new dataset and run/validate the data through previously created expectations.
:param checkpoint_name: name of checkpoint to create
:param expectation_suite_name: name of expectation suite to create
:param datasource_name: name of datasource
:param new_dataset_name: name of new dataset to validate and create checkpoint with
:param data_docs: boolean of whether to send checkpoint results to GE Data Docs
:return: checkpoint configuration
"""
context = ge.get_context()
yaml_config = f"""
name: {checkpoint_name}
config_version: 1.0
class_name: SimpleCheckpoint
run_name_template: "%Y%m%d-%H%M%S-my-run-name-template"
validations:
- batch_request:
datasource_name: {datasource_name}
data_connector_name: default_inferred_data_connector_name
data_asset_name: {new_dataset_name}
data_connector_query:
index: -1
expectation_suite_name: {expectation_suite_name}
"""
pprint(context.get_available_data_asset_names()) # print available datasources and expectation suites
try:
checkpoint = context.test_yaml_config(yaml_config=yaml_config)
except:
print("Failed to create GE checkpoint.")
checkpoint_config = checkpoint.get_substituted_config().to_yaml_str()
print(checkpoint_config) # print checkpoint config
context.add_checkpoint(**yaml.load(yaml_config)) # save checkpoint
if data_docs:
context.run_checkpoint(checkpoint_name=checkpoint_name)
context.open_data_docs()
print("Done creating checkpoint {}.".format(checkpoint_name))
return checkpoint_config
def detect_drift():
ge_config = yaml.load(open("config.yaml", "r"))
# args = parse_args() # inputted arguments
# context = ge.data_context.DataContext()
# batch_request = {'datasource_name': args["datasource"], 'data_connector_name': 'default_inferred_data_connector_name', 'data_asset_name': 'april.csv', 'limit': 1000}
# expectation_suite_name = args["name"]
# validator = context.get_validator(batch_request=BatchRequest(**batch_request),
# expectation_suite_name=expectation_suite_name)
# column_names = [f'"{column_name}"' for column_name in validator.columns()]
# print(f"Data Columns: {', '.join(column_names)}.")
# 1. Create Data Source
create_ge_datasource(datasource_name=ge_config["datasource_name"],
data_path=ge_config["datasource_path"],
overwrite=ge_config["overwrite"])
# 2. Create Expectation Suite
create_ge_expectations_suite(expectation_suite_name=ge_config["expectation_suite_name"],
datasource_name=ge_config["datasource_name"],
dataset_name=ge_config["expectation_data"],
categorical_variables=ge_config["categorical_variables"],
continuous_variables=ge_config["continuous_variables"],
data_docs=ge_config["data_docs"])
# 3. Create Checkpoint
create_ge_checkpoint(checkpoint_name=ge_config["checkpoint_name"],
expectation_suite_name=ge_config["expectation_suite_name"],
datasource_name=ge_config["datasource_name"],
new_dataset_name=ge_config["checkpoint_data"],
data_docs=ge_config["data_docs"])
# Parse GE Results
newest = lambda path: max([os.path.join(path, basename) for basename in os.listdir(path)], key=os.path.getctime)
path_to_json = newest(newest(newest(os.path.join("./great_expectations", "uncommitted", "validations", ge_config["expectation_suite_name"]))))
parse_ge_results(path=path_to_json,
save_path=ge_config["parsed_validations_path"])
if __name__ == "__main__":
detect_drift()
| 48.107011 | 171 | 0.701158 |
0fc4255615e9d122ce52cf76a1b4fbe5d38ff0af | 6,812 | py | Python | homeassistant/components/remote/__init__.py | dummys/home-assistant | dd908caebade15adf061fade686355b94ed2f43a | [
"Apache-2.0"
] | 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | homeassistant/components/remote/__init__.py | dummys/home-assistant | dd908caebade15adf061fade686355b94ed2f43a | [
"Apache-2.0"
] | 70 | 2020-07-23T07:13:50.000Z | 2022-03-31T06:01:52.000Z | homeassistant/components/remote/__init__.py | dummys/home-assistant | dd908caebade15adf061fade686355b94ed2f43a | [
"Apache-2.0"
] | 6 | 2018-02-04T03:48:55.000Z | 2022-01-24T20:37:04.000Z | """Support to interface with universal remote control devices."""
from __future__ import annotations
from collections.abc import Iterable
from datetime import timedelta
import functools as ft
import logging
from typing import Any, cast, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_COMMAND,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_ACTIVITY = "activity"
ATTR_ACTIVITY_LIST = "activity_list"
ATTR_CURRENT_ACTIVITY = "current_activity"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE = "device"
ATTR_NUM_REPEATS = "num_repeats"
ATTR_DELAY_SECS = "delay_secs"
ATTR_HOLD_SECS = "hold_secs"
ATTR_ALTERNATIVE = "alternative"
ATTR_TIMEOUT = "timeout"
DOMAIN = "remote"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SERVICE_SEND_COMMAND = "send_command"
SERVICE_LEARN_COMMAND = "learn_command"
SERVICE_DELETE_COMMAND = "delete_command"
SERVICE_SYNC = "sync"
DEFAULT_NUM_REPEATS = 1
DEFAULT_DELAY_SECS = 0.4
DEFAULT_HOLD_SECS = 0
SUPPORT_LEARN_COMMAND = 1
SUPPORT_DELETE_COMMAND = 2
SUPPORT_ACTIVITY = 4
REMOTE_SERVICE_ACTIVITY_SCHEMA = make_entity_service_schema(
{vol.Optional(ATTR_ACTIVITY): cv.string}
)
@bind_hass
def is_on(hass: HomeAssistant, entity_id: str) -> bool:
"""Return if the remote is on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for remotes."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_OFF, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_TURN_ON, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_TOGGLE, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_toggle"
)
component.async_register_entity_service(
SERVICE_SEND_COMMAND,
{
vol.Required(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DEVICE): cv.string,
vol.Optional(
ATTR_NUM_REPEATS, default=DEFAULT_NUM_REPEATS
): cv.positive_int,
vol.Optional(ATTR_DELAY_SECS): vol.Coerce(float),
vol.Optional(ATTR_HOLD_SECS, default=DEFAULT_HOLD_SECS): vol.Coerce(float),
},
"async_send_command",
)
component.async_register_entity_service(
SERVICE_LEARN_COMMAND,
{
vol.Optional(ATTR_DEVICE): cv.string,
vol.Optional(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_COMMAND_TYPE): cv.string,
vol.Optional(ATTR_ALTERNATIVE): cv.boolean,
vol.Optional(ATTR_TIMEOUT): cv.positive_int,
},
"async_learn_command",
)
component.async_register_entity_service(
SERVICE_DELETE_COMMAND,
{
vol.Required(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DEVICE): cv.string,
},
"async_delete_command",
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
return await cast(EntityComponent, hass.data[DOMAIN]).async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await cast(EntityComponent, hass.data[DOMAIN]).async_unload_entry(entry)
class RemoteEntity(ToggleEntity):
"""Base class for remote entities."""
_attr_activity_list: list[str] | None = None
_attr_current_activity: str | None = None
_attr_supported_features: int = 0
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._attr_supported_features
@property
def current_activity(self) -> str | None:
"""Active activity."""
return self._attr_current_activity
@property
def activity_list(self) -> list[str] | None:
"""List of available activities."""
return self._attr_activity_list
@final
@property
def state_attributes(self) -> dict[str, Any] | None:
"""Return optional state attributes."""
if not self.supported_features & SUPPORT_ACTIVITY:
return None
return {
ATTR_ACTIVITY_LIST: self.activity_list,
ATTR_CURRENT_ACTIVITY: self.current_activity,
}
def send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send commands to a device."""
raise NotImplementedError()
async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send commands to a device."""
await self.hass.async_add_executor_job(
ft.partial(self.send_command, command, **kwargs)
)
def learn_command(self, **kwargs: Any) -> None:
"""Learn a command from a device."""
raise NotImplementedError()
async def async_learn_command(self, **kwargs: Any) -> None:
"""Learn a command from a device."""
await self.hass.async_add_executor_job(ft.partial(self.learn_command, **kwargs))
def delete_command(self, **kwargs: Any) -> None:
"""Delete commands from the database."""
raise NotImplementedError()
async def async_delete_command(self, **kwargs: Any) -> None:
"""Delete commands from the database."""
await self.hass.async_add_executor_job(
ft.partial(self.delete_command, **kwargs)
)
class RemoteDevice(RemoteEntity):
"""Representation of a remote (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"RemoteDevice is deprecated, modify %s to extend RemoteEntity",
cls.__name__,
)
| 31.247706 | 88 | 0.698767 |
3e0a362994238c9ad2616739a31f147e326989ea | 681 | py | Python | bims/serializers/taxon_serializer.py | ann26/django-bims | 410e57d99137aea4146b4a40640f9f5ce03d06c5 | [
"MIT"
] | null | null | null | bims/serializers/taxon_serializer.py | ann26/django-bims | 410e57d99137aea4146b4a40640f9f5ce03d06c5 | [
"MIT"
] | null | null | null | bims/serializers/taxon_serializer.py | ann26/django-bims | 410e57d99137aea4146b4a40640f9f5ce03d06c5 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from bims.models import Taxon
class TaxonSerializer(serializers.ModelSerializer):
"""
Serializer for taxon collection model.
"""
iucn_status_sensitive = serializers.SerializerMethodField()
iucn_status_name = serializers.SerializerMethodField()
def get_iucn_status_sensitive(self, obj):
if obj.iucn_status:
return obj.iucn_status.sensitive
else:
return None
def get_iucn_status_name(self, obj):
if obj.iucn_status:
return obj.iucn_status.category
else:
return None
class Meta:
model = Taxon
fields = '__all__'
| 25.222222 | 63 | 0.666667 |
3c20526d641d5055ca854397dbc81579d66fa599 | 1,529 | py | Python | ax/plot/tests/test_fitted_scatter.py | Balandat/Ax | 6c7556165291a5329744b5075d5f95d2dec18938 | [
"MIT"
] | null | null | null | ax/plot/tests/test_fitted_scatter.py | Balandat/Ax | 6c7556165291a5329744b5075d5f95d2dec18938 | [
"MIT"
] | null | null | null | ax/plot/tests/test_fitted_scatter.py | Balandat/Ax | 6c7556165291a5329744b5075d5f95d2dec18938 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import plotly.graph_objects as go
from ax.modelbridge.registry import Models
from ax.plot.base import AxPlotConfig
from ax.plot.scatter import (
interact_fitted_plotly,
interact_fitted,
)
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_branin_experiment
class FittedScatterTest(TestCase):
def test_fitted_scatter(self):
exp = get_branin_experiment(with_str_choice_param=True, with_batch=True)
exp.trials[0].run()
model = Models.BOTORCH(
# Model bridge kwargs
experiment=exp,
data=exp.fetch_data(),
)
# Assert that each type of plot can be constructed successfully
plot = interact_fitted_plotly(model=model, rel=False)
self.assertIsInstance(plot, go.Figure)
plot = interact_fitted(model=model, rel=False)
self.assertIsInstance(plot, AxPlotConfig)
# Make sure all parameters and metrics are displayed in tooltips
tooltips = list(exp.parameters.keys()) + list(exp.metrics.keys())
for d in plot.data["data"]:
# Only check scatter plots hoverovers
if d["type"] != "scatter":
continue
for text in d["text"]:
for tt in tooltips:
self.assertTrue(tt in text)
| 36.404762 | 80 | 0.669065 |
9db3181077c32597e8dde76640466679afdc21a4 | 2,578 | py | Python | logisticRegression.py | NeiderFajardo/EjerciciosElectiva | ebad38107879f8d31ea26027758c02ff06368577 | [
"Apache-2.0"
] | null | null | null | logisticRegression.py | NeiderFajardo/EjerciciosElectiva | ebad38107879f8d31ea26027758c02ff06368577 | [
"Apache-2.0"
] | null | null | null | logisticRegression.py | NeiderFajardo/EjerciciosElectiva | ebad38107879f8d31ea26027758c02ff06368577 | [
"Apache-2.0"
] | null | null | null | #Neider Alejandro Fajardo-20142020025
#Ejercicio Machine Learning que incluye pipeline y corssValidation
#utilizando un método de regresión lógistica
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier,LogisticRegression
from pyspark.ml.feature import StringIndexer, VectorIndexer,HashingTF
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
sc = SparkContext('local')
spark = SparkSession(sc)
data = spark.read.format("libsvm").load("/home/neider/Documentos/PararellProgramming/MachineLearning1/sample_binary_classification_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, lr])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
treeModel = model.stages[2]
# summary only
print(treeModel)
hashingTF = HashingTF(inputCol="features", outputCol="features")
paramGrid = ParamGridBuilder() \
.addGrid(hashingTF.numFeatures, [10, 100, 1000]) \
.addGrid(lr.regParam, [0.1, 0.01]) \
.build()
crossval = CrossValidator(estimator=pipeline,
estimatorParamMaps=paramGrid,
evaluator=evaluator,
numFolds=2)
cvModel = crossval.fit(trainingData)
prediction = cvModel.transform(testData)
prediction.select("indexedLabel","prediction").show()
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
| 38.477612 | 141 | 0.769977 |
6a173c88e22205ebe0ca8f6665a680a0ad8aeb21 | 6,008 | py | Python | azure-mgmt-redis/azure/mgmt/redis/redis_management_client.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 1 | 2017-10-29T15:14:35.000Z | 2017-10-29T15:14:35.000Z | azure-mgmt-redis/azure/mgmt/redis/redis_management_client.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | null | null | null | azure-mgmt-redis/azure/mgmt/redis/redis_management_client.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.redis_operations import RedisOperations
from .operations.patch_schedules_operations import PatchSchedulesOperations
from . import models
class RedisManagementClientConfiguration(AzureConfiguration):
"""Configuration for RedisManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify the Microsoft Azure subscription. The subscription ID forms part
of the URI for every service call.
:type subscription_id: str
:param api_version: Client Api Version.
:type api_version: str
:param accept_language: Gets or sets the preferred language for the
response.
:type accept_language: str
:param long_running_operation_retry_timeout: Gets or sets the retry
timeout in seconds for Long Running Operations. Default value is 30.
:type long_running_operation_retry_timeout: int
:param generate_client_request_id: When set to true a unique
x-ms-client-request-id value is generated and included in each request.
Default is true.
:type generate_client_request_id: bool
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, credentials, subscription_id, api_version='2016-04-01', accept_language='en-US', long_running_operation_retry_timeout=30, generate_client_request_id=True, base_url=None, filepath=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if api_version is not None and not isinstance(api_version, str):
raise TypeError("Optional parameter 'api_version' must be str.")
if accept_language is not None and not isinstance(accept_language, str):
raise TypeError("Optional parameter 'accept_language' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(RedisManagementClientConfiguration, self).__init__(base_url, filepath)
self.add_user_agent('redismanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
self.api_version = api_version
self.accept_language = accept_language
self.long_running_operation_retry_timeout = long_running_operation_retry_timeout
self.generate_client_request_id = generate_client_request_id
class RedisManagementClient(object):
"""REST API for Azure Redis Cache Service.
:ivar config: Configuration for client.
:vartype config: RedisManagementClientConfiguration
:ivar redis: Redis operations
:vartype redis: .operations.RedisOperations
:ivar patch_schedules: PatchSchedules operations
:vartype patch_schedules: .operations.PatchSchedulesOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify the Microsoft Azure subscription. The subscription ID forms part
of the URI for every service call.
:type subscription_id: str
:param api_version: Client Api Version.
:type api_version: str
:param accept_language: Gets or sets the preferred language for the
response.
:type accept_language: str
:param long_running_operation_retry_timeout: Gets or sets the retry
timeout in seconds for Long Running Operations. Default value is 30.
:type long_running_operation_retry_timeout: int
:param generate_client_request_id: When set to true a unique
x-ms-client-request-id value is generated and included in each request.
Default is true.
:type generate_client_request_id: bool
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, credentials, subscription_id, api_version='2016-04-01', accept_language='en-US', long_running_operation_retry_timeout=30, generate_client_request_id=True, base_url=None, filepath=None):
self.config = RedisManagementClientConfiguration(credentials, subscription_id, api_version, accept_language, long_running_operation_retry_timeout, generate_client_request_id, base_url, filepath)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.redis = RedisOperations(
self._client, self.config, self._serialize, self._deserialize)
self.patch_schedules = PatchSchedulesOperations(
self._client, self.config, self._serialize, self._deserialize)
| 47.68254 | 203 | 0.732523 |
1a26bc6f3178250a89842a1d42ad6219873b3598 | 2,201 | py | Python | src/django_agent_trust/__init__.py | ThePumpingLemma/django-agent-trust | 43151a5c923abd9865723804bc84fb4665f399bb | [
"BSD-2-Clause"
] | null | null | null | src/django_agent_trust/__init__.py | ThePumpingLemma/django-agent-trust | 43151a5c923abd9865723804bc84fb4665f399bb | [
"BSD-2-Clause"
] | null | null | null | src/django_agent_trust/__init__.py | ThePumpingLemma/django-agent-trust | 43151a5c923abd9865723804bc84fb4665f399bb | [
"BSD-2-Clause"
] | null | null | null | from random import randrange
def trust_agent(request, trust_days=None):
"""
Mark the requesting agent as trusted for the currently logged-in user. This
does nothing for anonymous users.
:param request: The current request.
:type request: :class:`~django.http.HttpRequest`
:param float trust_days: The number of days to trust this agent. ``None``
for no agent-specific limit.
"""
from .models import Agent
if request.user.is_authenticated:
request.agent = Agent.trusted_agent(request.user, trust_days)
def trust_session(request):
"""
Mark the requesting agent as trusted in the context of the current session;
when the session ends, the agent's trust will be revoked. This replaces any
agent trust that already exists. All expiration settings and future
revocations still apply. This does nothing for anonymous users.
:param request: The current request.
:type request: :class:`~django.http.HttpRequest`
"""
from .models import SESSION_TOKEN_KEY, Agent
if request.user.is_authenticated:
# We need a token to link this agent to the current session. It's
# strictly internal, so it doesn't have to be cryptographically sound,
# just probabalistically unique.
token = randrange(2 ** 32)
request.session[SESSION_TOKEN_KEY] = token
request.agent = Agent.session_agent(request.user, token)
def revoke_agent(request):
"""
Revoke trust in the requesting agent for the currently logged-in user.
:param request: The current request.
:type request: :class:`~django.http.HttpRequest`
"""
from .models import Agent
request.agent = Agent.untrusted_agent(request.user)
def revoke_other_agents(request):
"""
Revoke trust in all of the logged-in user's agents other than the current
one. This does nothing for anonymous users.
:param request: The current request.
:type request: :class:`~django.http.HttpRequest`
"""
if request.user.is_authenticated:
request.user.agentsettings.serial += 1
request.user.agentsettings.save()
request.agent._serial = request.user.agentsettings.serial
| 32.850746 | 79 | 0.704225 |
6dbd6fca5794defbb28c36da6098221e422c4c35 | 3,098 | py | Python | WebHub/WebHub/spiders/pornHubSpider.py | peteyan/WebHubBot | 0d51fe6d345f1a439872e1f7091a0080634ad490 | [
"MIT"
] | null | null | null | WebHub/WebHub/spiders/pornHubSpider.py | peteyan/WebHubBot | 0d51fe6d345f1a439872e1f7091a0080634ad490 | [
"MIT"
] | null | null | null | WebHub/WebHub/spiders/pornHubSpider.py | peteyan/WebHubBot | 0d51fe6d345f1a439872e1f7091a0080634ad490 | [
"MIT"
] | null | null | null | # coding:utf-8
import json
import logging
import re
from WebHub.items import PornVideoItem
from WebHub.pornhub_type import PH_TYPES
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider
class Spider(CrawlSpider):
name = 'pornHubSpider'
host = 'https://www.pornhub.com'
start_urls = list(set(PH_TYPES))
logging.getLogger("requests").setLevel(logging.WARNING
) # 将requests的日志级别设成WARNING
logging.basicConfig(
level=logging.DEBUG,
format=
'%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='cataline.log',
filemode='w')
# test = True
def start_requests(self):
for ph_type in self.start_urls:
yield Request(url='https://www.pornhub.com/%s' % ph_type,
callback=self.parse_ph_key)
def parse_ph_key(self, response):
selector = Selector(response)
logging.debug('request url:------>' + response.url)
# logging.info(selector)
divs = selector.xpath('//div[@class="phimage"]')
for div in divs:
# logging.debug('divs :------>' + div.extract())
viewkey = re.findall('viewkey=(.*?)"', div.extract())
# logging.debug(viewkey)
yield Request(url='https://www.pornhub.com/embed/%s' % viewkey[0],
callback=self.parse_ph_info)
url_next = selector.xpath(
'//a[@class="orangeButton" and text()="Next "]/@href').extract()
logging.debug(url_next)
if url_next:
# if self.test:
logging.debug(' next page:---------->' + self.host + url_next[0])
yield Request(url=self.host + url_next[0],
callback=self.parse_ph_key)
# self.test = False
def parse_ph_info(self, response):
phItem = PornVideoItem()
selector = Selector(response)
# logging.info(selector)
# _ph_info = re.findall('var flashvars =(.*?),\n', selector.extract())
_ph_info = re.findall('var flashvars =(.*?)[,|;]\n', selector.extract())
logging.debug('PH信息的JSON:')
logging.debug(_ph_info)
if len(_ph_info) > 0:
_ph_info_json = json.loads(_ph_info[0])
duration = _ph_info_json.get('video_duration')
phItem['video_duration'] = duration
title = _ph_info_json.get('video_title')
phItem['video_title'] = title
image_url = _ph_info_json.get('image_url')
phItem['image_url'] = image_url
link_url = _ph_info_json.get('link_url')
phItem['link_url'] = link_url
quality_480p = _ph_info_json.get('quality_480p')
phItem['quality_480p'] = quality_480p
logging.info('duration:' + duration + ' title:' + title + ' image_url:'
+ image_url + ' link_url:' + link_url)
phItem['web_url'] = response.url
yield phItem
| 39.21519 | 83 | 0.576501 |
28dec11698bc7c7ea4313665cda04f4661b0c20f | 85 | py | Python | Harpe-website/website/contrib/communication/templatetags/harpe_communication_tags.py | Krozark/Harpe-Website | 1038a8550d08273806c9ec244cb8157ef9e9101e | [
"BSD-2-Clause"
] | null | null | null | Harpe-website/website/contrib/communication/templatetags/harpe_communication_tags.py | Krozark/Harpe-Website | 1038a8550d08273806c9ec244cb8157ef9e9101e | [
"BSD-2-Clause"
] | null | null | null | Harpe-website/website/contrib/communication/templatetags/harpe_communication_tags.py | Krozark/Harpe-Website | 1038a8550d08273806c9ec244cb8157ef9e9101e | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django import template
register = template.Library()
| 12.142857 | 29 | 0.670588 |
a2d27ef9d729e90e8875d38c57d28b08b52df523 | 2,385 | py | Python | Algorithms_intro_to/l2ps7.py | dazz22/udacity-into-to-algorithms | d8f092bb4f8237cf514188dfd1e4d6d8e5561489 | [
"Apache-2.0"
] | null | null | null | Algorithms_intro_to/l2ps7.py | dazz22/udacity-into-to-algorithms | d8f092bb4f8237cf514188dfd1e4d6d8e5561489 | [
"Apache-2.0"
] | null | null | null | Algorithms_intro_to/l2ps7.py | dazz22/udacity-into-to-algorithms | d8f092bb4f8237cf514188dfd1e4d6d8e5561489 | [
"Apache-2.0"
] | null | null | null | # Generate a combination lock graph given a list of nodes
#
def make_link(G, node1, node2):
if node1 not in G:
G[node1] = {}
(G[node1])[node2] = 1
if node2 not in G:
G[node2] = {}
(G[node2])[node1] = 1
return G
def create_combo_lock(nodes):
G = {}
# create chain
for n in nodes:
if n < (len(nodes)-1):
make_link(G, n, n+1)
# create link from the first node to every other node in the chain
for n in nodes:
if n < (len(nodes)-1):
make_link(G, nodes[0], n+1)
print(G)
return G
##############
# Code for testing
def is_chain(graph, nodes):
# find the first node with degree one
start = (n for n, e in graph.iteritems()
if len(e) == 1).next()
count = 1
# keep track of what we've seen to make
# sure there are no cycles
seen = set([start])
# follow the edges
prev = None
current = start
while True:
nexts = graph[current].keys()
# get rid of the edge back to prev
nexts = [n for n in nexts if not n == prev]
if len(nexts) > 1:
# bad. too many edges to be a chain
return False
elif len(nexts) == 0:
# We're done following the chain
# Did we get enough edges:
return count == len(nodes)
prev = current
current = nexts[0]
if current in seen:
# bad. this isn't a chain
# it has a loop
return False
seen.add(current)
count += 1
def is_combo_lock(graph, nodes):
# first see if we have a star
center = None
degree = None
for node, edges in graph.iteritems():
if len(edges) > degree:
center = node
degree = len(edges)
if not degree == len(nodes) - 1:
return False
# make a graph out of all the edges
# not connected to the center
chain = {}
for node, edges in graph.iteritems():
if node == center:
continue
for e in edges:
if e == center:
continue
make_link(chain, node, e)
return is_chain(chain, [n for n in nodes if n != center])
def test():
for n in [5, 10, 20]:
combo = create_combo_lock(range(n))
if not is_combo_lock(combo, range(n)):
return False
return True
| 25.37234 | 70 | 0.533753 |
1001ff52c58fe8de413273bdec73a0d0a07e3be1 | 10,031 | py | Python | cvpods/layers/batch_norm.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | cvpods/layers/batch_norm.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | cvpods/layers/batch_norm.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2019-2021 Megvii Inc. All rights reserved.
from loguru import logger
import torch
import torch.distributed as dist
from torch import nn
from torch.autograd.function import Function
from torch.nn import functional as F
from cvpods.utils import comm
from .wrappers import BatchNorm1d, BatchNorm2d
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features) - eps)
def forward(self, x):
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None:
# keep the origin key if version is None
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = self.running_mean.clone().detach()
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = self.running_var.clone().detach()
else:
if version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
if version < 3:
logger.info(
"FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip("."))
)
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels):
"""
Args:
norm (str or callable):
Returns:
nn.Module or None: the normalization layer
"""
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
"SyncBN": NaiveSyncBatchNorm,
"SyncBN1d": NaiveSyncBatchNorm1d,
"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
"nnSyncBN": nn.SyncBatchNorm, # keep for debugging
}[norm]
return norm(out_channels)
def get_activation(activation):
"""
Args:
activation (EasyDict or str):
Returns:
nn.Module or None: the activation layer
"""
if activation is None:
return None
atype = activation.NAME
inplace = activation.INPLACE
act = {
"ReLU": nn.ReLU,
"ReLU6": nn.ReLU6,
}[atype]
return act(inplace=inplace)
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class NaiveSyncBatchNorm(BatchNorm2d):
"""
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used, or when it is applied to mask head).
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def forward(self, input):
if comm.get_world_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs"
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3])
meansqr = torch.mean(input * input, dim=[0, 2, 3])
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return input * scale + bias
class NaiveSyncBatchNorm1d(BatchNorm1d):
"""
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used, or when it is applied to mask head).
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def forward(self, input):
if comm.get_world_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs"
C = input.shape[1]
mean = torch.mean(input, dim=[0])
meansqr = torch.mean(input * input, dim=[0])
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1)
bias = bias.reshape(1, -1)
return input * scale + bias
| 36.609489 | 99 | 0.619679 |
b12bb3e5d51edb4f8d396e338d1762e1dc41ae43 | 1,726 | py | Python | src/forms/changtheme.py | TuringApp/Turing | 787bc0eb15af632e7a7a95fd6848db62e91822ea | [
"MIT"
] | 42 | 2018-05-02T07:07:27.000Z | 2022-02-01T19:49:49.000Z | src/forms/changtheme.py | TuringApp/Turing | 787bc0eb15af632e7a7a95fd6848db62e91822ea | [
"MIT"
] | 65 | 2018-03-08T11:53:13.000Z | 2018-09-17T09:00:09.000Z | src/forms/changtheme.py | TuringApp/Turing | 787bc0eb15af632e7a7a95fd6848db62e91822ea | [
"MIT"
] | 8 | 2018-03-31T16:01:36.000Z | 2022-03-06T14:49:24.000Z | # -*- coding: utf-8 -*-
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from forms.ui_changtheme import Ui_ChangeThemeWindow
from util import theming
from util.widgets import center_widget
translate = QCoreApplication.translate
class ChangeThemeWindow(QDialog):
def __init__(self, parent, orig=()):
super().__init__(parent)
self.ui = Ui_ChangeThemeWindow()
self.ui.setupUi(self)
self.setFixedWidth(self.width())
self.adjustSize()
self.setFixedSize(self.size())
self.theme_callback = lambda: ()
orig = orig or ("",) * 20
def gen(txt):
return lambda: self.change_color(txt)
for i, t in enumerate(orig):
txt = getattr(self.ui, "txtColor_%02d" % (i + 1))
btn = getattr(self.ui, "btnCodeColor_%02d" % (i + 1))
txt.setText(t)
btn.clicked.connect(gen(txt))
self.ui.buttonBox.button(QDialogButtonBox.Apply).clicked.connect(self.apply_theme)
center_widget(self, parent)
def apply_theme(self):
colors = [getattr(self.ui, "txtColor_%02d" % (i + 1)).text() for i in range(24)]
theming.themes["custom"] = (theming.themes["custom"][0], colors)
self.theme_callback()
def done(self, res):
if res == QDialog.Accepted:
self.apply_theme()
self.ok = True
super(ChangeThemeWindow, self).done(res)
def change_color(self, wgt):
dlg = QColorDialog(self)
dlg.setCurrentColor(QColor(wgt.text()))
if dlg.exec_():
wgt.setText(dlg.currentColor().name())
def run(self):
return self.exec_() == QDialog.Accepted and self.ok
| 29.254237 | 90 | 0.615875 |
4713e2a3146c01564f48eb5aee3867235a45a406 | 1,536 | py | Python | rllib/utils/annotations.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 39 | 2021-02-02T23:09:31.000Z | 2022-03-28T16:39:12.000Z | rllib/utils/annotations.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 84 | 2021-03-06T08:02:56.000Z | 2022-03-05T08:07:19.000Z | rllib/utils/annotations.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 20 | 2021-02-05T05:51:39.000Z | 2022-03-04T21:13:24.000Z | def override(cls):
"""Annotation for documenting method overrides.
Args:
cls (type): The superclass that provides the overridden method. If this
cls does not actually have the method, an error is raised.
"""
def check_override(method):
if method.__name__ not in dir(cls):
raise NameError("{} does not override any method of {}".format(
method, cls))
return method
return check_override
def PublicAPI(obj):
"""Annotation for documenting public APIs.
Public APIs are classes and methods exposed to end users of RLlib. You
can expect these APIs to remain stable across RLlib releases.
Subclasses that inherit from a ``@PublicAPI`` base class can be
assumed part of the RLlib public API as well (e.g., all trainer classes
are in public API because Trainer is ``@PublicAPI``).
In addition, you can assume all trainer configurations are part of their
public API as well.
"""
return obj
def DeveloperAPI(obj):
"""Annotation for documenting developer APIs.
Developer APIs are classes and methods explicitly exposed to developers
for the purposes of building custom algorithms or advanced training
strategies on top of RLlib internals. You can generally expect these APIs
to be stable sans minor changes (but less stable than public APIs).
Subclasses that inherit from a ``@DeveloperAPI`` base class can be
assumed part of the RLlib developer API as well.
"""
return obj
| 32 | 79 | 0.696615 |
f67ac2747546a1c10a5b9c23dfea98c74371a26e | 146 | py | Python | Aniyom Ebenezer/Phase 2/STRINGS/Day_31_Challenge_Solution/Question 4 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Aniyom Ebenezer/Phase 2/STRINGS/Day_31_Challenge_Solution/Question 4 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Aniyom Ebenezer/Phase 2/STRINGS/Day_31_Challenge_Solution/Question 4 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | """
Write a Python program to check whether a string starts with specified characters.
"""
string = "Pyladies.com"
print(string.startswith("Pyl")) | 29.2 | 82 | 0.753425 |
d3da31d468e68e0c6bdf1e2ebe77aedbbcfe68ca | 13,739 | py | Python | webnotes/translate.py | saurabh6790/OFF-RISLIB | eb7866227c5ff085ea714f79576281d82365f4fe | [
"MIT"
] | null | null | null | webnotes/translate.py | saurabh6790/OFF-RISLIB | eb7866227c5ff085ea714f79576281d82365f4fe | [
"MIT"
] | null | null | null | webnotes/translate.py | saurabh6790/OFF-RISLIB | eb7866227c5ff085ea714f79576281d82365f4fe | [
"MIT"
] | null | null | null | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Contributing:
1. Add the .csv file
2. Run import
3. Then run translate
"""
import webnotes
import os
import codecs
import json
import re
from csv import reader
from webnotes.modules import get_doc_path,get_doctype_module
from webnotes.utils import get_base_path, cstr
def translate(lang=None):
languages = [lang]
if lang=="all" or lang==None:
languages = get_all_languages()
print "Extracting / updating translatable strings..."
build_message_files()
print "Compiling messages in one file..."
export_messages(lang, '_lang_tmp.csv')
for lang in languages:
if lang != "en":
filename = 'app/translations/'+lang+'.csv'
print "For " + lang + ":"
print "Translating via Google Translate..."
google_translate(lang, '_lang_tmp.csv', filename)
print "Updating language files..."
import_messages(lang, filename)
print "Deleting temp file..."
os.remove('_lang_tmp.csv')
def get_all_languages():
try:
return [f[:-4] for f in os.listdir("app/translations") if f.endswith(".csv")]
except OSError, e:
if e.args[0]==2:
return []
else:
raise
def get_lang_dict():
languages_path = os.path.join(get_base_path(), "app", "translations", "languages.json")
if os.path.exists(languages_path):
with open(languages_path, "r") as langfile:
return json.loads(langfile.read())
else: return {}
def update_translations():
"""
compare language file timestamps with last updated timestamps in `.wnf-lang-status`
if timestamps are missing / changed, build new `.json` files in the `lang folders`
"""
langstatus = {}
languages = get_all_languages()
message_updated = False
status_file_path = "app/.wnf-lang-status"
if not os.path.exists(os.path.join('app', 'translations')):
return
if os.path.exists(status_file_path):
with open(status_file_path, "r") as langstatusfile:
print langstatusfile
langstatus = eval(langstatusfile.read())
for lang in languages:
filename = os.path.join('app', 'translations', lang + '.csv')
if langstatus.get(lang, None)!=os.path.getmtime(filename):
print "Setting up lang files for " + lang + "..."
if not message_updated:
print "Extracting / updating translatable strings..."
build_message_files()
message_updated = True
print "Writing translations..."
import_messages(lang, filename)
langstatus[lang] = os.path.getmtime(filename)
with open(status_file_path, "w") as langstatusfile:
langstatus = langstatusfile.write(str(langstatus))
def build_message_files():
"""build from doctypes, pages, database and framework"""
if not webnotes.conn:
webnotes.connect()
build_for_pages('lib/core')
build_for_pages('app')
build_from_doctype_code('lib/core')
build_from_doctype_code('app')
#reports
build_from_query_report()
# doctype
build_from_database()
build_for_framework('lib/webnotes', 'py', with_doctype_names=True)
build_for_framework('lib/public/js/wn', 'js')
build_for_framework('app/public/js', 'js', with_doctype_names=True)
def build_for_pages(path):
"""make locale files for framework py and js (all)"""
messages = []
for (basepath, folders, files) in os.walk(path):
if 'locale' in folders: folders.remove('locale')
if os.path.basename(os.path.dirname(basepath))=="page":
messages_js, messages_py = [], []
for fname in files:
fname = cstr(fname)
if fname.endswith('.js'):
messages_js += get_message_list(os.path.join(basepath, fname))
if fname.endswith('.py'):
messages_py += get_message_list(os.path.join(basepath, fname))
if messages_js:
write_messages_file(basepath, messages_js, "js")
if messages_py:
write_messages_file(basepath, messages_py, "py")
def build_from_query_report():
"""make locale for the query reports from database and the framework js and py files"""
import re
for item in webnotes.conn.sql("""select name, report_name,ref_doctype, query
from `tabReport`""", as_dict=1):
messages_js, messages_py = [], []
if item:
messages_js.append(item.report_name)
messages_py.append(item.report_name)
# get the messages from the query using the regex :
# if we have the string "Production Date:Date:180" in the query then the regex will search for string between " and : .
# the regex will take "Production Date" and store them into messages
if item.query :
messages_query = re.findall('"([^:,^"]*):', item.query)
messages_js += messages_query
messages_py += messages_query
module = get_doctype_module(item.ref_doctype)
if module :
doctype_path = get_doc_path(module, "Report", item.name)
if os.path.exists(doctype_path):
for (basepath, folders, files) in os.walk(doctype_path):
if 'locale' in folders: folders.remove('locale')
for fname in files:
if fname.endswith('.js'):
messages_js += get_message_list(os.path.join(basepath, fname))
if fname.endswith('.py'):
messages_py += get_message_list(os.path.join(basepath, fname))
break
write_messages_file(doctype_path, messages_js, 'js')
write_messages_file(doctype_path, messages_py, 'py')
def build_from_database():
"""make doctype labels, names, options, descriptions"""
def get_select_options(doc):
if doc.doctype=="DocField" and doc.fieldtype=='Select' and doc.options \
and not doc.options.startswith("link:") \
and not doc.options.startswith("attach_files:"):
return doc.options.split('\n')
else:
return []
build_for_doc_from_database(webnotes._dict({
"doctype": "DocType",
"module_field": "module",
"DocType": ["name", "description", "module"],
"DocField": ["label", "description"],
"custom": get_select_options
}))
def build_for_doc_from_database(fields):
for item in webnotes.conn.sql("""select name from `tab%s`""" % fields.doctype, as_dict=1):
messages = []
doclist = webnotes.bean(fields.doctype, item.name).doclist
for doc in doclist:
if doc.doctype in fields:
messages += map(lambda x: x in fields[doc.doctype] and doc.fields.get(x) or None,
doc.fields.keys())
if fields.custom:
messages += fields.custom(doc)
doc = doclist[0]
if doc.fields.get(fields.module_field):
doctype_path = get_doc_path(doc.fields[fields.module_field],
doc.doctype, doc.name)
write_messages_file(doctype_path, messages, 'doc')
def build_for_framework(path, mtype, with_doctype_names = False):
"""make locale files for framework py and js (all)"""
messages = []
for (basepath, folders, files) in os.walk(path):
if 'locale' in folders: folders.remove('locale')
for fname in files:
fname = cstr(fname)
if fname.endswith('.' + mtype):
messages += get_message_list(os.path.join(basepath, fname))
# append module & doctype names
if with_doctype_names:
for m in webnotes.conn.sql("""select name, module from `tabDocType`"""):
messages.append(m[0])
messages.append(m[1])
# append labels from config.json
config = webnotes.get_config()
for moduleinfo in config["modules"].values():
if moduleinfo.get("label"):
messages.append(moduleinfo["label"])
if messages:
write_messages_file(path, messages, mtype)
def build_from_doctype_code(path):
"""walk and make locale files in all folders"""
for (basepath, folders, files) in os.walk(path):
messagespy = []
messagesjs = []
for fname in files:
fname = cstr(fname)
if fname.endswith('py'):
messagespy += get_message_list(os.path.join(basepath, fname))
if fname.endswith('js'):
messagesjs += get_message_list(os.path.join(basepath, fname))
if messagespy:
write_messages_file(basepath, messagespy, 'py')
if messagespy:
write_messages_file(basepath, messagesjs, 'js')
def get_message_list(path):
"""get list of messages from a code file"""
import re
messages = []
with open(path, 'r') as sourcefile:
txt = sourcefile.read()
messages += re.findall('_\("([^"]*)".*\)', txt)
messages += re.findall("_\('([^']*)'.*\)", txt)
messages += re.findall('_\("{3}([^"]*)"{3}.*\)', txt, re.S)
return messages
def write_messages_file(path, messages, mtype):
"""write messages to translation file"""
if not os.path.exists(path):
return
if not os.path.exists(os.path.join(path, 'locale')):
os.makedirs(os.path.join(path, 'locale'))
fname = os.path.join(path, 'locale', '_messages_' + mtype + '.json')
messages = list(set(messages))
filtered = []
for m in messages:
if m and re.search('[a-zA-Z]+', m):
filtered.append(m)
with open(fname, 'w') as msgfile:
msgfile.write(json.dumps(filtered, indent=1))
def export_messages(lang, outfile):
"""get list of all messages"""
messages = {}
# extract messages
for (basepath, folders, files) in os.walk('.'):
def _get_messages(messages, basepath, mtype):
mlist = get_messages(basepath, mtype)
if not mlist:
return
# update messages with already existing translations
langdata = get_lang_data(basepath, lang, mtype)
for m in mlist:
if not messages.get(m):
messages[m] = langdata.get(m, "")
if os.path.basename(basepath)=='locale':
_get_messages(messages, basepath, 'doc')
_get_messages(messages, basepath, 'py')
_get_messages(messages, basepath, 'js')
# remove duplicates
if outfile:
from csv import writer
with open(outfile, 'w') as msgfile:
w = writer(msgfile)
keys = messages.keys()
keys.sort()
for m in keys:
w.writerow([m.encode('utf-8'), messages.get(m, '').encode('utf-8')])
def import_messages(lang, infile):
"""make individual message files for each language"""
data = dict(get_all_messages_from_file(infile))
for (basepath, folders, files) in os.walk('.'):
def _update_lang_file(mtype):
"""create a langauge file for the given message type"""
messages = get_messages(basepath, mtype)
if not messages: return
# read existing
langdata = get_lang_data(basepath, lang, mtype)
# update fresh
for m in messages:
if data.get(m):
langdata[m] = data.get(m)
if langdata:
# write new langfile
langfilename = os.path.join(basepath, lang + '-' + mtype + '.json')
with open(langfilename, 'w') as langfile:
langfile.write(json.dumps(langdata, indent=1, sort_keys=True).encode('utf-8'))
#print 'wrote ' + langfilename
if os.path.basename(basepath)=='locale':
# make / update lang files for each type of message file (doc, js, py)
# example: hi-doc.json, hi-js.json, hi-py.json
_update_lang_file('doc')
_update_lang_file('js')
_update_lang_file('py')
def load_doc_messages(module, doctype, name):
if webnotes.lang=="en":
return {}
if not webnotes.local.translated_docs:
webnotes.local.translated_docs = []
doc_path = get_doc_path(module, doctype, name)
# don't repload the same doc again
if (webnotes.lang + ":" + doc_path) in webnotes.local.translated_docs:
return
if not docs_loaded:
webnotes.local.translate_docs_loaded = []
webnotes.local.translated_docs.append(webnotes.lang + ":" + doc_path)
webnotes.local.translations.update(get_lang_data(doc_path, None, 'doc'))
def get_lang_data(basepath, lang, mtype):
"""get language dict from langfile"""
# add "locale" folder if reqd
if os.path.basename(basepath) != 'locale':
basepath = os.path.join(basepath, 'locale')
if not lang: lang = webnotes.local.lang
path = os.path.join(basepath, lang + '-' + mtype + '.json')
langdata = {}
if os.path.exists(path):
with codecs.open(path, 'r', 'utf-8') as langfile:
langdata = json.loads(langfile.read())
return langdata
def get_messages(basepath, mtype):
"""load list of messages from _message files"""
# get message list
path = os.path.join(basepath, '_messages_' + mtype + '.json')
messages = []
if os.path.exists(path):
with open(path, 'r') as msgfile:
messages = json.loads(msgfile.read())
return messages
def update_lang_js(jscode, path):
return jscode + "\n\n$.extend(wn._messages, %s)" % \
json.dumps(get_lang_data(path, webnotes.lang, 'js'))
def get_all_messages_from_file(path):
with codecs.open(path, 'r', 'utf-8') as msgfile:
data = msgfile.read()
data = reader([r.encode('utf-8') for r in data.splitlines()])
newdata = []
for row in data:
newrow = []
for val in row:
newrow.append(unicode(val, 'utf-8'))
newdata.append(newrow)
return newdata
def google_translate(lang, infile, outfile):
"""translate objects using Google API. Add you own API key for translation"""
data = get_all_messages_from_file(infile)
import requests
from webnotes import conf
old_translations = {}
# update existing translations
if os.path.exists(outfile):
with codecs.open(outfile, "r", "utf-8") as oldfile:
old_data = oldfile.read()
old_translations = dict(reader([r.encode('utf-8').strip() for r in old_data.splitlines()]))
with open(outfile, 'w') as msgfile:
from csv import writer
w = writer(msgfile)
for row in data:
if row[0] and row[0].strip():
if old_translations.get(row[0].strip()):
row[1] = old_translations[row[0].strip()]
else:
print 'translating: ' + row[0]
response = requests.get("""https://www.googleapis.com/language/translate/v2""",
params = {
"key": conf.google_api_key,
"source": "en",
"target": lang,
"q": row[0]
})
data = response.json()
if "error" in data:
print data
continue
row[1] = data["data"]["translations"][0]["translatedText"]
if not row[1]:
row[1] = row[0] # google unable to translate!
row[1] = row[1].encode('utf-8')
row[0] = row[0].encode('utf-8')
w.writerow(row)
| 30.396018 | 122 | 0.684766 |
ef2e7ee93a2377a7f631bf24ceacd04a6b5c4178 | 397 | py | Python | shop/urls.py | Slohn/myshop | 4e5034dc3b1561773060319f16628d44910c4a20 | [
"MIT"
] | 20 | 2021-05-07T19:32:56.000Z | 2022-02-06T12:12:56.000Z | shop/urls.py | Slohn/myshop | 4e5034dc3b1561773060319f16628d44910c4a20 | [
"MIT"
] | 9 | 2021-04-08T18:29:18.000Z | 2022-03-11T23:28:23.000Z | shop/urls.py | Slohn/myshop | 4e5034dc3b1561773060319f16628d44910c4a20 | [
"MIT"
] | 10 | 2021-02-22T13:50:24.000Z | 2022-03-15T11:51:27.000Z | from django.urls import path
from shop import views
app_name='shop'
urlpatterns = [
path('', views.product_list, name='product_list'),
path('search', views.product_search, name='product_search'),
path('(?P<category_slug>[-\w]+)/$',
views.product_list, name='product_list_by_category'),
path('(?P<id>\d+)/(?P<slug>[-\w]+)/$',views.product_detail, name='product_detail'),
] | 33.083333 | 87 | 0.667506 |
69de2624d18e88e1a378fb13496ff154c09087b2 | 3,283 | py | Python | monty/exts/utils/delete.py | onerandomusername/monty-python | fcd8b2827eb9bbb2a05d28f80ac9e215589f03f7 | [
"MIT"
] | 20 | 2021-12-31T10:17:20.000Z | 2022-03-31T04:16:17.000Z | monty/exts/utils/delete.py | onerandomusername/monty-bot | b1c769e44b56bc45f37fc809064571d59c80db27 | [
"MIT"
] | 1 | 2022-03-13T22:34:33.000Z | 2022-03-13T22:34:52.000Z | monty/exts/utils/delete.py | onerandomusername/monty-bot | b1c769e44b56bc45f37fc809064571d59c80db27 | [
"MIT"
] | 3 | 2022-01-02T15:21:46.000Z | 2022-03-05T09:37:54.000Z | import disnake
from disnake.ext import commands
from monty.bot import Monty
from monty.log import get_logger
from monty.utils.messages import DELETE_ID_V2
VIEW_DELETE_ID_V1 = "wait_for_deletion_interaction_trash"
logger = get_logger(__name__)
class DeleteManager(commands.Cog, slash_command_attrs={"dm_permission": False}):
"""Handle delete buttons being pressed."""
def __init__(self, bot: Monty):
self.bot = bot
# button schema
# prefix:PERMS:USERID
# optional :MSGID
@commands.Cog.listener("on_button_click")
async def handle_v2_button(self, inter: disnake.MessageInteraction) -> None:
"""Delete a message if the user is authorized to delete the message."""
if not inter.component.custom_id.startswith(DELETE_ID_V2):
return
custom_id = inter.component.custom_id.removeprefix(DELETE_ID_V2)
perms, user_id, *extra = custom_id.split(":")
delete_msg = None
if extra:
if extra[0]:
delete_msg = int(extra[0])
perms, user_id = int(perms), int(user_id)
# check if the user id is the allowed user OR check if the user has any of the permissions allowed
if not (is_orig_author := inter.author.id == user_id):
permissions = disnake.Permissions(perms)
user_permissions = inter.permissions
if not permissions.value & user_permissions.value:
await inter.response.send_message("Sorry, this delete button is not for you!", ephemeral=True)
return
if (
not hasattr(inter.channel, "guild")
or not (myperms := inter.channel.permissions_for(inter.me)).read_messages
):
await inter.response.defer()
await inter.delete_original_message()
return
await inter.message.delete()
if not delete_msg or not myperms.manage_messages or not is_orig_author:
return
if msg := inter.bot.get_message(delete_msg):
if msg.edited_at:
return
else:
msg = inter.channel.get_partial_message(delete_msg)
try:
await msg.delete()
except disnake.NotFound:
pass
except disnake.Forbidden:
logger.warning("Cache is unreliable, or something weird occured.")
@commands.Cog.listener("on_button_click")
async def handle_v1_buttons(self, inter: disnake.MessageInteraction) -> None:
"""Handle old, legacy, buggy v1 deletion buttons that still may exist."""
if inter.component.custom_id != VIEW_DELETE_ID_V1:
return
view = disnake.ui.View.from_message(inter.message)
# get the button from the view
for comp in view.children:
if VIEW_DELETE_ID_V1 == getattr(comp, "custom_id", None):
break
else:
raise RuntimeError("view doesn't contain the button that was clicked.")
comp.disabled = True
await inter.response.edit_message(view=view)
await inter.followup.send("This button should not have been enabled, and no longer works.", ephemeral=True)
def setup(bot: Monty) -> None:
"""Add the DeleteManager to the bot."""
bot.add_cog(DeleteManager(bot))
| 35.684783 | 115 | 0.645446 |
f2d8d386d5432971b802b310ae7d7428b872c472 | 579 | py | Python | setup.py | tleewu/django-follow | 2c70242c8198259c9dec54a9f92df4e055b5dd12 | [
"MIT"
] | 36 | 2015-02-12T07:42:16.000Z | 2020-07-10T10:11:48.000Z | setup.py | saumishr/django-follow | d23fe6b8aaa1fea12db0c1175847e420f6b9f058 | [
"MIT"
] | 4 | 2015-02-12T02:55:50.000Z | 2020-07-22T19:25:52.000Z | setup.py | saumishr/django-follow | d23fe6b8aaa1fea12db0c1175847e420f6b9f058 | [
"MIT"
] | 20 | 2015-02-22T08:06:19.000Z | 2020-05-13T20:47:49.000Z | #!/usr/bin/env python
from setuptools import setup, find_packages
import follow
setup(
name='django-follow',
description='Application which enables following features for users. Can be used for contact books or whatnot',
long_description=open('README.rst').read(),
packages=find_packages(),
author='Alen Mujezinovic',
author_email='alen@caffeinehit.com',
url='https://github.com/caffeinehit/django-follow',
include_package_data=True,
package_data={'follow': ['templates/follow/*html']},
zip_safe=False,
version=follow.__version__,
)
| 30.473684 | 115 | 0.727116 |
d005ab9af742f92c4be640377d24f97baca58d30 | 261 | py | Python | statistics_website/manage.py | stupa-hh/website | e3dc4f4850c8caf957011f8e974537d5ec7d45d1 | [
"MIT"
] | null | null | null | statistics_website/manage.py | stupa-hh/website | e3dc4f4850c8caf957011f8e974537d5ec7d45d1 | [
"MIT"
] | 13 | 2016-01-19T09:41:29.000Z | 2017-04-03T12:41:50.000Z | statistics_website/manage.py | stupa-hh/website | e3dc4f4850c8caf957011f8e974537d5ec7d45d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "statistics_website.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.727273 | 82 | 0.781609 |
9393b20d9f93b28f9d8bb593037182b9911906ec | 2,600 | py | Python | tests/test_DialogueServer.py | Dagu9/Reinforcement-learning-SGD | eb4a2546d6c99917b33e8cc4c210709e7d4cc15e | [
"Apache-2.0"
] | 2 | 2020-01-20T14:43:27.000Z | 2021-04-29T12:21:05.000Z | tests/test_DialogueServer.py | vmishra04/Pydial | a689fa1177cd34f32dd4d30a5a6140fb721855bf | [
"Apache-2.0"
] | null | null | null | tests/test_DialogueServer.py | vmishra04/Pydial | a689fa1177cd34f32dd4d30a5a6140fb721855bf | [
"Apache-2.0"
] | null | null | null | ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
************************
**test_DialogueServer.py** - test DialogueServer()
================================================================================
Use *utils/dummyDialogueServerClient* to create multiprocess instances of a fake
client which communicate concurrently with a running dialogue server in a separate process.
'''
import os,sys
curdir = os.path.dirname(os.path.realpath(__file__))
curdir = curdir.split('/')
curdir = '/'.join(curdir[:-1]) +'/'
os.chdir(curdir)
sys.path.append(curdir)
#from nose.tools import with_setup
from ontology import Ontology
from utils import Settings, dummyDialogueServerClient, ContextLogger
import multiprocessing as mp
import DialogueServer
import time
class TDialogueServer():
"""
"""
def __init__(self):
cfg = 'tests/test_configs/dialogueserver.cfg'
assert(os.path.exists(cfg))
Settings.init(config_file=cfg)
ContextLogger.createLoggingHandlers(config=Settings.config)
def ds(self):
reload(Ontology.FlatOntologyManager)
Ontology.init_global_ontology()
dial_server = DialogueServer.DialogueServer()
dial_server.run()
def test_dialogueserver(self):
'''Create a DialogueServer and a few dummy clients
'''
p = mp.Process(target=self.ds)
p.start()
dummyDialogueServerClient.run_fake_clients(NUM_CLIENTS=3,pause_time=0,DIALOGS_PER_CLIENT=1)
p.terminate()
def Test():
test = TDialogueServer()
print "\nExecuting tests in",test.__class__.__name__
test.test_dialogueserver()
print "Done"
if __name__ == '__main__':
Test()
#END OF FILE
| 29.885057 | 99 | 0.630385 |
2178594fe32b69be8da80802487d949469499939 | 974 | py | Python | custom_plate/test_db.py | Dharun/TensorPy | fa1a34bd7127e6b1867b2adcaf88ede063554ef9 | [
"FTL",
"CNRI-Python"
] | 267 | 2018-03-07T06:42:33.000Z | 2022-03-08T20:46:14.000Z | custom_plate/test_db.py | Santoshi055/Tensorflow-License-Plate-Detection | 4d446a8e6ec60e6708a29f626b0a01e9a40b923b | [
"FTL",
"CNRI-Python"
] | 20 | 2018-07-23T11:53:50.000Z | 2021-04-27T14:12:45.000Z | custom_plate/test_db.py | Santoshi055/Tensorflow-License-Plate-Detection | 4d446a8e6ec60e6708a29f626b0a01e9a40b923b | [
"FTL",
"CNRI-Python"
] | 140 | 2018-03-07T06:43:02.000Z | 2022-03-22T03:16:40.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 26 15:27:59 2018
Script created to support tensorflow
It pushes License plate value along with the timestamp
in POSTGRESSQL
@author: tensorflow-cuda
"""
import psycopg2
def psyco_insert_plate(plate):
plate_char = str(plate)
try:
connect_str = "dbname='testpython' user='postgres' host='localhost' " + \
"password='postgres'"
conn = psycopg2.connect(connect_str)
print('ok')
cursor = conn.cursor()
cursor.execute('INSERT INTO testing (numplate) VALUES (%s)',(plate_char))
conn.commit()
print('ok')
cursor.execute("""SELECT * from testing""")
rows = cursor.fetchall()
# print(rows)
print('Plate inserted')
return rows
except Exception as e:
print("Big problem. Invalid dbname, user or password?")
print(e)
yo = 'sdffv'
print(psyco_insert_plate(yo)) | 30.4375 | 81 | 0.617043 |
9f2792e56b88b762091d4dda28dc98669bd9e5e9 | 1,950 | py | Python | TIDALDL-GUI-CROSS/tidal_gui/viewModel/taskItemModel.py | Voilibaar/Tidal-Media-Downloader | 8af25e0600670210ce00871eedb19a3e8b55377e | [
"Apache-2.0"
] | 2,003 | 2018-11-09T06:44:25.000Z | 2022-03-31T08:14:35.000Z | TIDALDL-GUI-CROSS/tidal_gui/viewModel/taskItemModel.py | Voilibaar/Tidal-Media-Downloader | 8af25e0600670210ce00871eedb19a3e8b55377e | [
"Apache-2.0"
] | 776 | 2018-11-17T22:48:39.000Z | 2022-03-29T16:40:36.000Z | TIDALDL-GUI-CROSS/tidal_gui/viewModel/taskItemModel.py | Voilibaar/Tidal-Media-Downloader | 8af25e0600670210ce00871eedb19a3e8b55377e | [
"Apache-2.0"
] | 466 | 2018-10-29T17:06:58.000Z | 2022-03-28T05:57:33.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : taskItemModel.py
@Date : 2021/9/14
@Author : Yaronzz
@Version : 1.0
@Contact : yaronhuang@foxmail.com
@Desc :
"""
import _thread
import aigpy.stringHelper
from tidal_dl import Type
from tidal_dl.model import Album, Track, Video, Playlist
from tidal_gui.tidalImp import tidalImp
from tidal_gui.view.taskItemView import TaskItemView
from tidal_gui.viewModel.viewModel import ViewModel
class TaskItemModel(ViewModel):
def __init__(self, data):
super(TaskItemModel, self).__init__()
self.view = TaskItemView()
if isinstance(data, Album):
self.__initAlbum__(data)
elif isinstance(data, Track):
self.__initTrack__(data)
elif isinstance(data, Video):
self.__initVideo__(data)
elif isinstance(data, Playlist):
self.__initPlaylist__(data)
def __initAlbum__(self, data: Album):
title = data.title
desc = f"by {data.artist.name} " \
f"{tidalImp.getDurationString(data.duration)} " \
f"Track-{data.numberOfTracks} " \
f"Video-{data.numberOfVideos}"
self.view.setLabel(title, desc)
def __thread_func__(model: TaskItemModel, album: Album):
cover = tidalImp.getCoverData(album.cover, '1280', '1280')
self.view.setPic(cover)
msg, tracks, videos = tidalImp.getItems(album.id, Type.Album)
if not aigpy.stringHelper.isNull(msg):
model.view.setErrmsg(msg)
return
# TODO
for item in tracks:
pass
for item in videos:
pass
_thread.start_new_thread(__thread_func__, (self, data))
def __initTrack__(self, data: Track):
pass
def __initVideo__(self, data: Video):
pass
def __initPlaylist__(self, data: Playlist):
pass
| 29.104478 | 73 | 0.614872 |
f6a9c77ede2465bcc41eb341efc4510f8c0ff009 | 4,912 | py | Python | geoUtils.py | tgadf/utils | e0176d4f8a5bed4ecc3b63cb4bf2ee1265840900 | [
"MIT"
] | null | null | null | geoUtils.py | tgadf/utils | e0176d4f8a5bed4ecc3b63cb4bf2ee1265840900 | [
"MIT"
] | null | null | null | geoUtils.py | tgadf/utils | e0176d4f8a5bed4ecc3b63cb4bf2ee1265840900 | [
"MIT"
] | null | null | null | from pyspark.sql.functions import udf
from pyspark.sql.types import StringType, DoubleType, FloatType
from numpy import pi, cos
from haversine import haversine
from pandasUtils import isDataFrame
import geohash
def getGeo3(lat, long):
try:
retval = geohash.encode(lat, long, precision=3)
except:
retval = None
return retval
def getGeo4(lat, long):
try:
retval = geohash.encode(lat, long, precision=4)
except:
retval = None
return retval
def getGeo5(lat, long):
try:
retval = geohash.encode(lat, long, precision=5)
except:
retval = None
return retval
def getGeo6(lat, long):
try:
retval = geohash.encode(lat, long, precision=6)
except:
retval = None
return retval
def getGeo7(lat, long):
try:
retval = geohash.encode(lat, long, precision=7)
except:
retval = None
return retval
############################################################################################################
#
# Geo8
#
############################################################################################################
# Encode Geohash BitLen8 with Vector Inputs
def funcGeo8(x):
try:
lat = x[0]
long = x[1]
except:
retval = 'yyyyyyyy'
return retval
try:
retval = geohash.encode(lat, long, precision=8)
except:
retval = 'xxxxxxxx'
return retval
# Encode Geohash BitLen8 with 2 Inputs
def getGeo8(lat, long):
try:
retval = geohash.encode(lat, long, precision=8)
except:
retval = 'zzzzzzzz'
return retval
# Apply Geo8 function to DataFrame columns
def applyGeo8(df, latCol='lat', lngCol='long'):
if not isDataFrame(df):
raise ValueError("Cannot apply geohash function because the df is not a DataFrame")
colvals = df[[latCol, lngCol]].apply(funcGeo8, axis=1).values
return colvals
# Apply Geo8 function to PySpark DataFrame columns
get_geo8_udf = udf(lambda lat,long: getGeo8(lat, long), StringType())
############################################################################################################
#
# Distance Metrics
#
############################################################################################################
def getDist(gcode1, gcode2, units='m'):
if all((isinstance(x, str) for x in [gcode1, gcode2])):
try:
pnt1 = geohash.decode_exactly(gcode1)[:2]
pnt2 = geohash.decode_exactly(gcode2)[:2]
dist = haversine(pnt1, pnt2)
except:
dist = None
elif all((isinstance(x, (list,tuple)) for x in [gcode1, gcode2])):
try:
dist = haversine(gcode1, gcode2)
except:
dist = None
elif isinstance(gcode1, str) and isinstance(gcode2, (list,tuple)):
try:
pnt1 = geohash.decode_exactly(gcode1)[:2]
dist = haversine(pnt1, gcode2)
except:
dist = None
elif isinstance(gcode2, str) and isinstance(gcode1, (list,tuple)):
try:
pnt2 = geohash.decode_exactly(gcode2)[:2]
dist = haversine(pnt2, gcode1)
except:
dist = None
else:
raise ValueError("Did not understand types {0} and {1} for getDist() inputs in geoClustering.py".format(type(gcode1), type(gcode2)))
if units == 'm':
try:
dist = 1000*dist
except:
dist = None
return dist
def convertMetersToLat(dist, debug=False):
# Earth's radius, sphere
R=6378137
# Distances
dn = dist
# Coordinate offsets in radians
dLat = dn/R
# OffsetPosition, decimal degrees
lat = dLat * 180/pi
return lat
def convertLatToMeters(ang, debug=False):
# Earth's radius, sphere
R=6378137
# convert decimal to radians
dLat = ang * pi/180
# scale to Earth's radius
dist = dLat * R
return dist
def convertMetersToLong(dist, lat, debug=False):
# Earth's radius, sphere
R=6378137
# Distances
de = dist
# Coordinate offsets in radians
dLon = de/(R*cos(pi*lat/180))
# OffsetPosition, decimal degrees
lng = dLon * 180/pi
return lng
def convertLongToMeters(ang, lat, debug=False):
# Earth's radius, sphere
R=6378137
# convert decimal to radians
dLon = ang * pi/180
dLat = lat * pi/180
# scale to Earth's radius with known latitude
dist = dLon * (R*cos(dLat))
return dist
prec=""
get_geo3_udf = udf(lambda lat,long: getGeo3(lat, long), StringType())
get_geo4_udf = udf(lambda lat,long: getGeo4(lat, long), StringType())
get_geo5_udf = udf(lambda lat,long: getGeo5(lat, long), StringType())
get_geo6_udf = udf(lambda lat,long: getGeo6(lat, long), StringType())
get_geo7_udf = udf(lambda lat,long: getGeo7(lat, long), StringType())
| 25.450777 | 140 | 0.561889 |
d6070608dd75d637db7df2390df25d28e828da70 | 5,296 | py | Python | integrations/tensorflow/e2e/keras/vision_model_test.py | WindQAQ/iree | 68fc75cbe6e4bdf175885c17d41f4d61a55c3537 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/keras/vision_model_test.py | WindQAQ/iree | 68fc75cbe6e4bdf175885c17d41f4d61a55c3537 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/keras/vision_model_test.py | WindQAQ/iree | 68fc75cbe6e4bdf175885c17d41f4d61a55c3537 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test all applications models in Keras."""
import os
from absl import app
from absl import flags
import numpy as np
from pyiree.tf.support import tf_test_utils
from pyiree.tf.support import tf_utils
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
# Testing all applications models automatically can take time
# so we test it one by one, with argument --model=MobileNet
flags.DEFINE_string('model', 'ResNet50', 'model name')
flags.DEFINE_string(
'url', '', 'url with model weights '
'for example https://storage.googleapis.com/iree_models/')
flags.DEFINE_enum('data', 'cifar10', ['cifar10', 'imagenet'],
'data sets on which model was trained: imagenet, cifar10')
flags.DEFINE_bool(
'include_top', True,
'Whether or not to include the final (top) layers of the model.')
APP_MODELS = {
'ResNet50':
tf.keras.applications.resnet.ResNet50,
'ResNet101':
tf.keras.applications.resnet.ResNet101,
'ResNet152':
tf.keras.applications.resnet.ResNet152,
'ResNet50V2':
tf.keras.applications.resnet_v2.ResNet50V2,
'ResNet101V2':
tf.keras.applications.resnet_v2.ResNet101V2,
'ResNet152V2':
tf.keras.applications.resnet_v2.ResNet152V2,
'VGG16':
tf.keras.applications.vgg16.VGG16,
'VGG19':
tf.keras.applications.vgg19.VGG19,
'Xception':
tf.keras.applications.xception.Xception,
'InceptionV3':
tf.keras.applications.inception_v3.InceptionV3,
'InceptionResNetV2':
tf.keras.applications.inception_resnet_v2.InceptionResNetV2,
'MobileNet':
tf.keras.applications.mobilenet.MobileNet,
'MobileNetV2':
tf.keras.applications.mobilenet_v2.MobileNetV2,
'DenseNet121':
tf.keras.applications.densenet.DenseNet121,
'DenseNet169':
tf.keras.applications.densenet.DenseNet169,
'DenseNet201':
tf.keras.applications.densenet.DenseNet201,
'NASNetMobile':
tf.keras.applications.nasnet.NASNetMobile,
'NASNetLarge':
tf.keras.applications.nasnet.NASNetLarge,
}
def get_input_shape():
if FLAGS.data == 'imagenet':
if FLAGS.model in ['InceptionV3', 'Xception', 'InceptionResNetV2']:
return (1, 299, 299, 3)
elif FLAGS.model == 'NASNetLarge':
return (1, 331, 331, 3)
else:
return (1, 224, 224, 3)
elif FLAGS.data == 'cifar10':
return (1, 32, 32, 3)
else:
raise ValueError(f'Data not supported: {FLAGS.data}')
def load_cifar10_weights(model):
file_name = 'cifar10' + FLAGS.model
# get_file will download the model weights from a publicly available folder,
# save them to cache_dir=~/.keras/models/ and return a path to them.
url = os.path.join(
FLAGS.url, f'cifar10_include_top_{FLAGS.include_top:d}_{FLAGS.model}.h5')
weights_path = tf.keras.utils.get_file(file_name, url)
model.load_weights(weights_path)
return model
def initialize_model():
tf_utils.set_random_seed()
tf.keras.backend.set_learning_phase(False)
# Keras applications models receive input shapes without a batch dimension, as
# the batch size is dynamic by default. This selects just the image size.
input_shape = get_input_shape()[1:]
# If weights == 'imagenet', the model will load the appropriate weights from
# an external tf.keras URL.
weights = 'imagenet' if FLAGS.data == 'imagenet' else None
model = APP_MODELS[FLAGS.model](
weights=weights, include_top=FLAGS.include_top, input_shape=input_shape)
if FLAGS.data == 'cifar10' and FLAGS.url:
model = load_cifar10_weights(model)
return model
class VisionModule(tf.Module):
def __init__(self):
super(VisionModule, self).__init__()
self.m = initialize_model()
# Specify input shape with a static batch size.
# TODO(b/142948097): Add support for dynamic shapes in SPIR-V lowering.
# Replace input_shape with m.input_shape to make the batch size dynamic.
self.predict = tf.function(
input_signature=[tf.TensorSpec(get_input_shape())])(
self.m.call)
@tf_test_utils.compile_module(VisionModule, exported_names=['predict'])
class AppTest(tf_test_utils.TracedModuleTestCase):
def test_application(self):
def predict(module):
module.predict(tf_utils.uniform(get_input_shape()))
self.compare_backends(predict)
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
if FLAGS.model not in APP_MODELS:
raise ValueError(f'Unsupported model: {FLAGS.model}')
# Override VisionModule's __name__ to be more specific.
VisionModule.__name__ = os.path.join(FLAGS.model, FLAGS.data)
tf.test.main()
if __name__ == '__main__':
app.run(main)
| 32.292683 | 80 | 0.712424 |
4a3990c0b6f96cc982474bc70cb042db9a0cb718 | 5,185 | py | Python | salvemais/models.py | SalveMais/api | 679c74db2d93df49dd62b235c5b0a07a78145d5e | [
"MIT"
] | null | null | null | salvemais/models.py | SalveMais/api | 679c74db2d93df49dd62b235c5b0a07a78145d5e | [
"MIT"
] | null | null | null | salvemais/models.py | SalveMais/api | 679c74db2d93df49dd62b235c5b0a07a78145d5e | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from flask import current_app
from werkzeug.security import generate_password_hash, check_password_hash
from . import db
from .util.data import cell_chart, protein_chart
from .util.hash import generate_auth_token
class BloodType(db.Document):
cell = db.StringField(required=True, choices=('A', 'B', 'AB', 'O'))
protein = db.StringField(required=True, choices=('+', '-'))
@classmethod
def get(cls, blood_type=None, **kwargs):
if blood_type:
cell, protein = blood_type[:-1], blood_type[-1:]
if cell not in cell_chart or protein not in protein_chart:
return []
kwargs['cell'] = cell
kwargs['protein'] = protein
try:
blood = BloodType.objects.get(**kwargs)
except cls.DoesNotExist:
blood = BloodType(**kwargs).save()
return blood
def can_donate(self, to_blood=None):
pass
def can_receive(self, from_blood=None):
if from_blood:
if isinstance(from_blood, str):
from_blood = self.find(from_blood)
match_cell = cell_chart[self.cell][from_blood.cell]
match_protein = protein_chart[self.protein][from_blood.protein]
return match_cell and match_protein
matches = []
for blood in BloodType.objects:
match_cell = cell_chart[self.cell][blood.cell]
match_protein = protein_chart[self.protein][blood.protein]
if match_cell and match_protein:
matches.append(blood)
return matches
def __unicode__(self):
return '{}{}'.format(self.cell, self.protein)
class Address(db.EmbeddedDocument):
street = db.StringField(max_length=150)
number = db.StringField(max_length=15)
complement = db.StringField(max_length=50)
district = db.StringField(max_length=50)
city = db.StringField(max_length=50)
state = db.StringField(max_length=2)
cep = db.StringField(max_length=15)
lat = db.FloatField()
lng = db.FloatField()
class Facebook(db.EmbeddedDocument):
user_id = db.StringField(max_length=150)
api_token = db.StringField(max_length=150)
class Google(db.EmbeddedDocument):
user_id = db.StringField(max_length=150)
api_token = db.StringField(max_length=150)
class User(db.Document):
email = db.EmailField()
password_hash = db.StringField(max_length=255)
auth_token = db.StringField(max_length=255)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def set_auth_token(self):
self.auth_token = generate_auth_token()
@classmethod
def create(cls, **kwargs):
user = cls()
user.email = kwargs['email']
user.set_password(kwargs['password'])
user.save()
return user
class Profile(db.Document):
user = db.ReferenceField(User)
email = db.EmailField()
name = db.StringField(max_length=150)
nickname = db.StringField(max_length=50)
phone = db.StringField(max_length=50)
facebook = db.EmbeddedDocumentField(Facebook)
google = db.EmbeddedDocumentField(Google)
meta = {
'abstract': True,
'allow_inheritance': True,
}
class Donor(Profile):
blood_type = db.ReferenceField(BloodType)
cpf = db.StringField()
gender = db.StringField(choices=current_app.config['GENDERS'])
birthday = db.DateTimeField()
height = db.FloatField()
weigth = db.FloatField()
def grace_period(self):
latest_donation = self.latest_donation
today = datetime.now()
return today - latest_donation.timestamp
@property
def latest_donation(self):
return self.donations().orderby('timestamp')[0]
@property
def blood_volume(self):
return current_app.config['BLOOD_RATIO'][self.gender] * self.weigth
@property
def years_old(self):
return datetime.now().year - self.birthday.year
@property
def can_donate(self):
age_check = 18 <= self.years_old <= 60
weight_check = self.weigth >= 50 if self.weigth else True
year_donations = self.donations(timestamp__gte=datetime.now() - timedelta(months=12)).count()
if self.gender == 'M':
month_range = 2 # 60 days
yearly_amount = 4 # 4 donations/year
else:
month_range = 3 # 90 days
yearly_amount = 3 # 3 donations/year
month_donations = self.donations(timestamp__gte=datetime.now() - timedelta(months=month_range)).count()
frequency_check = month_donations < 2 and year_donations < yearly_amount
return age_check and weight_check and frequency_check
def donations(self, **kwargs):
return Donation.objects(donor=self, **kwargs)
class Hemocenter(Profile):
address = db.EmbeddedDocumentField(Address)
class Donation(db.Document):
donor = db.ReferenceField(Donor)
hemocenter = db.ReferenceField(Hemocenter)
timestamp = db.DateTimeField(default=datetime.now)
| 30.321637 | 111 | 0.662681 |
2305bfe5d1d2b5c4b9c7aa0765cea78f951db31c | 4,889 | py | Python | 2018/11/2018_Day_11a.py | vScourge/Advent_of_Code | 91c743e20e1105d25bcc30da882677ce11c8092a | [
"MIT"
] | null | null | null | 2018/11/2018_Day_11a.py | vScourge/Advent_of_Code | 91c743e20e1105d25bcc30da882677ce11c8092a | [
"MIT"
] | null | null | null | 2018/11/2018_Day_11a.py | vScourge/Advent_of_Code | 91c743e20e1105d25bcc30da882677ce11c8092a | [
"MIT"
] | null | null | null | """
--- Day 11: Chronal Charge ---
You watch the Elves and their sleigh fade into the distance as they head toward the North Pole.
Actually, you're the one fading. The falling sensation returns.
The low fuel warning light is illuminated on your wrist-mounted device. Tapping it once causes it to project a hologram of the situation: a 300x300 grid of fuel cells and their current power levels, some negative. You're not sure what negative power means in the context of time travel, but it can't be good.
Each fuel cell has a coordinate ranging from 1 to 300 in both the X (horizontal) and Y (vertical) direction. In X,Y notation, the top-left cell is 1,1, and the top-right cell is 300,1.
The interface lets you select any 3x3 square of fuel cells. To increase your chances of getting to your destination, you decide to choose the 3x3 square with the largest total power.
The power level in a given fuel cell can be found through the following process:
Find the fuel cell's rack ID, which is its X coordinate plus 10.
Begin with a power level of the rack ID times the Y coordinate.
Increase the power level by the value of the grid serial number (your puzzle input).
Set the power level to itself multiplied by the rack ID.
Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers with no hundreds digit become 0).
Subtract 5 from the power level.
For example, to find the power level of the fuel cell at 3,5 in a grid with serial number 8:
The rack ID is 3 + 10 = 13.
The power level starts at 13 * 5 = 65.
Adding the serial number produces 65 + 8 = 73.
Multiplying by the rack ID produces 73 * 13 = 949.
The hundreds digit of 949 is 9.
Subtracting 5 produces 9 - 5 = 4.
So, the power level of this fuel cell is 4.
Here are some more example power levels:
Fuel cell at 122,79, grid serial number 57: power level -5.
Fuel cell at 217,196, grid serial number 39: power level 0.
Fuel cell at 101,153, grid serial number 71: power level 4.
Your goal is to find the 3x3 square which has the largest total power. The square must be entirely within the 300x300 grid. Identify this square using the X,Y coordinate of its top-left fuel cell. For example:
For grid serial number 18, the largest total 3x3 square has a top-left corner of 33,45 (with a total power of 29); these fuel cells appear in the middle of this 5x5 region:
-2 -4 4 4 4
-4 4 4 4 -5
4 3 3 4 -4
1 1 2 4 -3
-1 0 2 -5 -2
For grid serial number 42, the largest 3x3 square's top-left is 21,61 (with a total power of 30); they are in the middle of this region:
-3 4 2 2 2
-4 4 3 3 4
-5 3 3 4 -4
4 3 3 4 -3
3 3 3 -5 -1
What is the X,Y coordinate of the top-left fuel cell of the 3x3 square with the largest total power?
Your puzzle input is 3031.
"""
import sys
import time
import numpy
### CLASSES ###
### FUNCTIONS ###
def get_power_level( coords, serial_num ):
"""
Find the fuel cell's rack ID, which is its X coordinate plus 10.
Begin with a power level of the rack ID times the Y coordinate.
Increase the power level by the value of the grid serial number (your puzzle input).
Set the power level to itself multiplied by the rack ID.
Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers with no hundreds digit become 0).
Subtract 5 from the power level.
For example, to find the power level of the fuel cell at 3,5 in a grid with serial number 8:
The rack ID is 3 + 10 = 13.
The power level starts at 13 * 5 = 65.
Adding the serial number produces 65 + 8 = 73.
Multiplying by the rack ID produces 73 * 13 = 949.
The hundreds digit of 949 is 9.
Subtracting 5 produces 9 - 5 = 4.
So, the power level of this fuel cell is 4.
Here are some more example power levels:
Fuel cell at 122,79, grid serial number 57: power level -5.
Fuel cell at 217,196, grid serial number 39: power level 0.
Fuel cell at 101,153, grid serial number 71: power level 4.
"""
rack_id = coords[ 0 ] + 10
power = int( '000' + str( ( rack_id * coords[ 1 ] + serial_num ) * rack_id )[ -3 ] ) - 5
return power
def get_square_power( grid, pos ):
power = 0
for y in range( 3 ):
for x in range( 3 ):
power += grid[ ( pos[ 0 ] + x, pos[ 1 ] + y ) ]
return power
### MAIN ###
if __name__ == "__main__":
"""
"""
# Populate grid with power levels
grid = numpy.zeros( ( 301, 301 ), dtype = numpy.int )
for y in range( 300 ):
for x in range( 300 ):
grid[ ( x+1, y+1 ) ] = get_power_level( (x+1,y+1), 3031 )
# Find square with most power
top_coords = None
top_power = 0
for y in range( 300 - 3 ):
for x in range( 300 - 3 ):
coords = (x+1,y+1)
power = get_square_power( grid, coords )
if power > top_power:
top_coords = coords
top_power = power
print( 'square coord with most power = {0}'.format( top_coords ) )
print( 'done' ) | 35.427536 | 308 | 0.695848 |
af1c2e0c03a8ddb66feba9b135f511e2e7976827 | 373 | py | Python | src/stream/protocol/serverbound/shutdown_serverbound_packet.py | dzikoysk/CefStream | 48843b6f4222b68b97237437bfde36910637736c | [
"Apache-2.0"
] | 5 | 2018-03-20T21:33:58.000Z | 2018-06-21T10:01:42.000Z | src/stream/protocol/serverbound/shutdown_serverbound_packet.py | dzikoysk/cefstream | 48843b6f4222b68b97237437bfde36910637736c | [
"Apache-2.0"
] | 2 | 2018-06-19T21:15:35.000Z | 2018-06-19T21:15:40.000Z | src/stream/protocol/serverbound/shutdown_serverbound_packet.py | dzikoysk/CefStream | 48843b6f4222b68b97237437bfde36910637736c | [
"Apache-2.0"
] | null | null | null | from overrides import overrides
from src.stream.protocol.packet import ServerboundPacket
from src.stream.protocol.packets import Packets
class ShutdownServerboundPacket(ServerboundPacket):
@overrides
def receive(self, cefstream, socket):
pass
@staticmethod
@overrides
def get_packet_id():
return Packets.SHUTDOWN_SERVERBOUND_PACKET
| 21.941176 | 56 | 0.764075 |
c92ab6e15e411051902e14353b7790816cb48799 | 106,491 | py | Python | raw_packet/Utils/base.py | Vladimir-Ivanov-Git/raw_packet | 78d27b3dc9532d27faa6e5d853c62bc9c8b21e71 | [
"MIT"
] | 146 | 2018-09-28T13:34:01.000Z | 2022-03-21T21:35:12.000Z | raw_packet/Utils/base.py | Vladimir-Ivanov-Git/raw_packet | 78d27b3dc9532d27faa6e5d853c62bc9c8b21e71 | [
"MIT"
] | 18 | 2019-06-05T17:59:08.000Z | 2021-12-22T10:26:18.000Z | raw_packet/Utils/base.py | Vladimir-Ivanov-Git/raw_packet | 78d27b3dc9532d27faa6e5d853c62bc9c8b21e71 | [
"MIT"
] | 26 | 2018-11-09T07:47:42.000Z | 2022-03-12T22:40:33.000Z | # region Description
"""
base.py: Base class for Raw-packet project (base)
Author: Vladimir Ivanov
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from raw_packet.Utils.vendors import vendors_dictionary
try:
from platform import system, release, linux_distribution
except ImportError:
from platform import system, release
try:
from os import getuid
except ImportError:
from ctypes import windll
from os.path import dirname, abspath, isfile, join
try:
from pwd import getpwuid
except ModuleNotFoundError:
pass
from random import choice, randint
from socket import inet_ntoa
try:
from netifaces import interfaces, ifaddresses, gateways, AF_LINK, AF_INET, AF_INET6
except ModuleNotFoundError:
from socket import AF_INET, AF_INET6
from getmac import get_mac_address
from ifaddr import get_adapters
from netaddr import IPNetwork, IPAddress
from netaddr.core import AddrFormatError
from struct import pack, error
from ipaddress import IPv4Address, AddressValueError
from re import match, compile, search
import subprocess as sub
import psutil as ps
import socket as sock
from distro import linux_distribution
from prettytable import PrettyTable
from typing import Dict, List, Union
from paramiko import RSAKey, SSHClient, AutoAddPolicy, SSHException
from paramiko.ssh_exception import NoValidConnectionsError, AuthenticationException
from colorama import init, Fore, Style
from threading import Lock
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Development'
# endregion
# region Main class - Base
class Base:
# region Set variables
vendors: Dict[str, str] = vendors_dictionary
os_installed_packages_list = None
_lock: Lock = Lock()
_windows_mac_address_regex = compile(r'([0-9a-f]{2}-[0-9a-f]{2}-[0-9a-f]{2}-[0-9a-f]{2}-[0-9a-f]{2}-[0-9a-f]{2})')
_windows_adapters = None
_current_platform: Union[None, str] = None
_network_interfaces_multicast_macs: Dict[str, List[str]] = \
{'example-network-interface': ['33:33:00:00:00:02']}
_network_interfaces_settings: Dict[str, Dict[str, Union[None, bool, int, float, str, List[str]]]] = \
{'example-network-interface': {
'network-interface': 'example-network-interface',
'is-wireless': False,
'essid': 'AP',
'bssid': '12:34:56:78:90:ab',
'channel': 1,
'frequency': 2.4,
'mac-address': '12:34:56:78:90:ab',
'ipv4-address': '192.168.0.1',
'ipv6-link-address': 'fe80::1234:5678:90ab:cdef',
'ipv6-global-address': '2001:4860:4860::8888',
'ipv6-global-addresses': ['2001:4860:4860::8888', '2001:4860:4860::8844'],
'ipv4-netmask': '255.255.255.0',
'ipv4-network': '192.168.0.0/24',
'first-ipv4-address': '192.168.0.1',
'second-ipv4-address': '192.168.0.2',
'penultimate-ipv4-address': '192.168.0.253',
'last-ipv4-address': '192.168.0.254',
'ipv4-broadcast': '192.168.0.255',
'ipv4-gateway': '192.168.0.254',
'ipv6-gateway': 'fe80::1234:5678:8765:4321'
}}
# endregion
# region Init
def __init__(self,
admin_only: bool = True,
available_platforms: List[str] = ['Linux', 'Darwin', 'Windows']) -> None:
"""
Init
"""
# Check user is admin/root
if admin_only:
self.check_user()
# Check platform
self.check_platform(available_platforms=available_platforms)
# If current platform is Windows get network interfaces settings
if self.get_platform().startswith('Windows'):
self._windows_adapters = get_adapters()
init(convert=True)
self.cINFO: str = Style.BRIGHT + Fore.BLUE
self.cERROR: str = Style.BRIGHT + Fore.RED
self.cSUCCESS: str = Style.BRIGHT + Fore.GREEN
self.cWARNING: str = Style.BRIGHT + Fore.YELLOW
self.cEND: str = Style.RESET_ALL
self.c_info: str = self.cINFO + '[*]' + self.cEND + ' '
self.c_error: str = self.cERROR + '[-]' + self.cEND + ' '
self.c_success: str = self.cSUCCESS + '[+]' + self.cEND + ' '
self.c_warning: str = self.cWARNING + '[!]' + self.cEND + ' '
self.lowercase_letters: str = 'abcdefghijklmnopqrstuvwxyz'
self.uppercase_letters: str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.digits: str = '0123456789'
# endregion
# region Output functions
def get_banner(self, script_name: Union[None, str] = None) -> str:
"""
Get string of colored banner
:return: String of colored banner
"""
banner: str = \
self.cSUCCESS + \
" _ _ \n" + \
" _ __ __ ___ __ _ __ __ _ ___| | _____| |_ \n" + \
"| '__/ _` \ \ /\ / /___ | '_ \ / _` |/ __| |/ / _ \ __|\n" + \
"| | | (_| |\ V V /|___|| |_) | (_| | (__| < __/ |_ \n" + \
"|_| \__,_| \_/\_/ | .__/ \__,_|\___|_|\_\___|\__|\n" + \
" |_| v" + __version__ + "\n" + \
self.cEND + self.cWARNING + \
" https://raw-packet.github.io/\r\n" + self.cEND
if script_name is not None:
banner += '\n' + ' ' * (int((55 - len(script_name)) / 2)) + self.cINFO + script_name + self.cEND + '\n'
return banner
def print_banner(self, script_name: Union[None, str] = None) -> None:
"""
Print colored banner in console
:return: None
"""
print(self.get_banner(script_name))
def _color_print(self, color: str = 'blue', *strings: str) -> None:
"""
Print colored text in console
:param color: Set color: blue, red, orange, green (default: blue)
:param strings: Strings for printing in console
:return: None
"""
result_output_string: str = ''
if color == 'blue':
result_output_string += self.c_info
elif color == 'red':
result_output_string += self.c_error
elif color == 'orange':
result_output_string += self.c_warning
elif color == 'green':
result_output_string += self.c_success
else:
result_output_string += self.c_info
for index in range(len(strings)):
if index % 2 == 0:
result_output_string += strings[index]
else:
if color == 'blue':
result_output_string += self.cINFO
if color == 'red':
result_output_string += self.cERROR
if color == 'orange':
result_output_string += self.cWARNING
if color == 'green':
result_output_string += self.cSUCCESS
result_output_string += strings[index] + self.cEND
self._lock.acquire()
print(result_output_string)
self._lock.release()
def _color_text(self, color: str = 'blue', string: str = '') -> str:
"""
Make colored string
:param color: Set color: blue, red, orange, green (default: blue)
:param string: Input string (example: 'test')
:return: Colored string (example: '\033[1;34mtest\033[0m')
"""
if color == 'blue':
return self.cINFO + string + self.cEND
elif color == 'red':
return self.cERROR + string + self.cEND
elif color == 'orange':
return self.cWARNING + string + self.cEND
elif color == 'green':
return self.cSUCCESS + string + self.cEND
else:
return self.cINFO + string + self.cEND
def print_info(self, *strings: str) -> None:
"""
Print informational text in console
:param strings: Strings for printing in console
:return: None
"""
self._color_print('blue', *strings)
def print_error(self, *strings: str) -> None:
"""
Print error text in console
:param strings: Strings for printing in console
:return: None
"""
self._color_print('red', *strings)
def print_warning(self, *strings: str) -> None:
"""
Print warning text in console
:param strings: Strings for printing in console
:return: None
"""
self._color_print('orange', *strings)
def print_success(self, *strings: str) -> None:
"""
Print success text in console
:param strings: Strings for printing in console
:return: None
"""
self._color_print('green', *strings)
def info_text(self, text: str) -> str:
"""
Make information text
:param text: Input string (example: 'test')
:return: Colored string (example: '\033[1;34mtest\033[0m')
"""
return self._color_text('blue', text)
def error_text(self, text: str) -> str:
"""
Make error text
:param text: Input string (example: 'test')
:return: Colored string (example: '\033[1;31mtest\033[0m')
"""
return self._color_text('red', text)
def warning_text(self, text: str) -> str:
"""
Make warning text
:param text: Input string (example: 'test')
:return: Colored string (example: '\033[1;32mtest\033[0m')
"""
return self._color_text('orange', text)
def success_text(self, text: str) -> str:
"""
Make success text
:param text: Input string (example: 'test')
:return: Colored string (example: '\033[1;33mtest\033[0m')
"""
return self._color_text('green', text)
# endregion
# region Check platform and user functions
def get_platform(self) -> str:
"""
Get your platform
:return: Platform string (example: 'Windows 10' or 'Darwin 19.0.0' or 'Linux Ubuntu 18.04')
"""
if self._current_platform is None:
linux_dist = linux_distribution()
try:
assert linux_dist[0] != '' and linux_dist[1] != '' and linux_dist[0] != system()
self._current_platform = str(system()) + ' ' + str(linux_dist[0]) + ' ' + str(linux_dist[1])
except AssertionError:
self._current_platform = str(system()) + ' ' + str(release())
return self._current_platform
def check_platform(self,
available_platforms: List[str] = ['Linux', 'Darwin', 'Windows'],
exit_on_failure: bool = True,
exit_code: int = 1,
quiet: bool = False) -> bool:
"""
Check Python version and OS
:param available_platforms: Available Platforms list (example: ['Linux', 'Darwin', 'Windows'])
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 1)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if OS is Linux or False if not
"""
for available_platform in available_platforms:
if available_platform in self.get_platform():
return True
if not quiet:
print('This script can run only on: ' + ' and '.join(available_platforms))
print('Your platform: ' + self.get_platform() + ' not supported!')
if exit_on_failure:
exit(exit_code)
return False
@staticmethod
def check_user(exit_on_failure: bool = True,
exit_code: int = 2,
quiet: bool = False) -> bool:
"""
Check user privileges
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 2)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if user is root or False if not
"""
try:
if getuid() != 0:
if not quiet:
print('Only root can run this script!')
print('User: ' + str(getpwuid(getuid())[0]) + ' can not run this script!')
if exit_on_failure:
exit(exit_code)
return False
except NameError:
if windll.shell32.IsUserAnAdmin() == 0:
if not quiet:
print('Only Administartor can run this script!')
if exit_on_failure:
exit(exit_code)
return False
return True
# endregion
# region Pack functions
@staticmethod
def pack8(data: Union[int, str, bytes],
exit_on_failure: bool = True,
exit_code: int = 3,
quiet: bool = False) -> Union[None, bytes]:
"""
Pack 8 bit data
:param data: Input data
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 3)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Packed 8 bit data
"""
try:
return pack('B', data)
except error:
if not quiet:
print('Bad value for 8 bit pack: ' + str(data))
if exit_on_failure:
exit(exit_code)
return None
@staticmethod
def pack16(data: Union[int, str, bytes],
exit_on_failure: bool = True,
exit_code: int = 4,
quiet: bool = False) -> Union[None, bytes]:
"""
Pack 16 bit data
:param data: Input data
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 4)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Packed 16 bit data
"""
try:
return pack('!H', data)
except error:
if not quiet:
print('Bad value for 16 bit pack: ' + str(data))
if exit_on_failure:
exit(exit_code)
return None
@staticmethod
def pack32(data: Union[int, str, bytes],
exit_on_failure: bool = True,
exit_code: int = 5,
quiet: bool = False) -> Union[None, bytes]:
"""
Pack 32 bit data
:param data: Input data
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 5)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Packed 32 bit data
"""
try:
return pack('!I', data)
except error:
if not quiet:
print('Bad value for 32 bit pack: ' + str(data))
if exit_on_failure:
exit(exit_code)
return None
@staticmethod
def pack64(data: Union[int, str, bytes],
exit_on_failure: bool = True,
exit_code: int = 6,
quiet: bool = False) -> Union[None, bytes]:
"""
Pack 64 bit data
:param data: Input data
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 6)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Packed 64 bit data
"""
try:
return pack('!Q', data)
except error:
if not quiet:
print('Bad value for 64 bit pack: ' + str(data))
if exit_on_failure:
exit(exit_code)
return None
# endregion
# region Network interface functions
def list_of_network_interfaces(self) -> Union[None, List[str]]:
"""
Get list of network interfaces
:return: list of network interfaces (example: ['lo', 'eth0'])
"""
if self.get_platform().startswith('Windows'):
result_list: List[str] = list()
for adapter in self._windows_adapters:
for ip in adapter.ips:
if ip.nice_name not in result_list:
result_list.append(ip.nice_name)
return result_list
else:
return interfaces()
def list_of_wireless_network_interfaces(self) -> List[str]:
"""
Get list of wireless network interfaces
:return: list of wireless network interfaces (example: ['wlan0', 'wlan1'])
"""
try:
wireless_network_interfaces: List[str] = list()
current_platform: str = self.get_platform()
# Mac OS
if current_platform.startswith('Darwin'):
interfaces_info: sub.CompletedProcess = \
sub.run(['networksetup -listnetworkserviceorder'], shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
interfaces_info: str = interfaces_info.stdout.decode('utf-8')
interfaces_info: List[str] = interfaces_info.splitlines()
for output_line in interfaces_info:
if 'Wi-Fi' in output_line and 'Device: ' in output_line:
search_result = search(r'Device: (?P<interface_name>[a-zA-Z0-9]{2,16})\)', output_line)
if search_result is not None:
wireless_network_interfaces.append(search_result.group('interface_name'))
# Linux
elif current_platform.startswith('Linux'):
interfaces_info: sub.CompletedProcess = \
sub.run(['iwconfig'], shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
interfaces_info: str = interfaces_info.stdout.decode('utf-8')
interfaces_info: List[str] = interfaces_info.splitlines()
for output_line in interfaces_info:
if 'IEEE 802.11' in output_line:
search_result = search(r'^(?P<interface_name>[a-zA-Z0-9]{2,32}) +IEEE', output_line)
if search_result is not None:
wireless_network_interfaces.append(search_result.group('interface_name'))
# Windows
elif current_platform.startswith('Windows'):
netsh_command: sub.Popen = \
sub.Popen('netsh wlan show interfaces', shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
netsh_command_output, netsh_command_error = netsh_command.communicate()
interfaces_info: str = netsh_command_output.decode('utf-8') + \
netsh_command_error.decode('utf-8')
interfaces_info: List[str] = interfaces_info.splitlines()
for output_line in interfaces_info:
if 'Name' in output_line:
search_result = search(r'^ +Name +: (?P<interface_name>.*)$', output_line)
if search_result is not None:
wireless_network_interfaces.append(search_result.group('interface_name'))
# Other
else:
pass
return wireless_network_interfaces
except AssertionError:
return list()
def network_interface_selection(self,
interface_name: Union[None, str] = None,
exclude_interface: Union[None, str] = None,
only_wireless: bool = False,
message: Union[None, str] = None) -> str:
"""
Select network interface
:param interface_name: Network interface name (example: 'eth0'; default: None)
:param exclude_interface: Exclude network interface from list of interfaces (example: 'eth1'; default: None)
:param only_wireless: Select network interface only from wireless interfaces (default: False)
:param message: Print message before select network interface from table (example: 'Select network interface from table: ')
:return: Network interface name (example: 'eth0')
"""
network_interface_index: int = 1
if not only_wireless:
available_network_interfaces: List[str] = self.list_of_network_interfaces()
else:
available_network_interfaces: List[str] = self.list_of_wireless_network_interfaces()
if exclude_interface is not None:
available_network_interfaces.remove(exclude_interface)
if interface_name is not None:
if interface_name in available_network_interfaces:
self.get_interface_settings(interface_name=interface_name, required_parameters=[], quiet=True)
return interface_name
else:
if not only_wireless:
self.print_error('Network interface: ', interface_name, ' does not exist!')
else:
self.print_error('Wireless network interface: ', interface_name, ' does not exist!')
exit(1)
else:
if 'lo' in available_network_interfaces:
available_network_interfaces.remove('lo')
if len(available_network_interfaces) > 1:
if message is not None:
self.print_warning(message)
interfaces_pretty_table = PrettyTable([self.info_text('Index'),
self.info_text('Interface name'),
self.info_text('MAC address'),
self.info_text('IPv4 address'),
self.info_text('IPv6 link address')])
for network_interface in available_network_interfaces:
network_interface_settings = self.get_interface_settings(interface_name=network_interface,
required_parameters=[], quiet=True)
network_interface_mac_address: Union[None, str] = \
network_interface_settings['mac-address']
if network_interface_mac_address is None:
network_interface_mac_address = 'None'
network_interface_ipv4_address: Union[None, str] = \
network_interface_settings['ipv4-address']
if network_interface_ipv4_address is None:
network_interface_ipv4_address = 'None'
network_interface_ipv6_link_address: Union[None, str] = \
network_interface_settings['ipv6-link-address']
if network_interface_ipv6_link_address is None:
network_interface_ipv6_link_address = 'None'
interfaces_pretty_table.add_row([str(network_interface_index),
network_interface,
network_interface_mac_address,
network_interface_ipv4_address,
network_interface_ipv6_link_address])
network_interface_index += 1
print(interfaces_pretty_table)
network_interface_index -= 1
print(self.c_warning + 'Select network interface from range (1-' +
str(network_interface_index) + '): ', end='')
current_network_interface_index = input()
if not current_network_interface_index.isdigit():
self.print_error('Your input data: ', current_network_interface_index, ' is not digit!')
exit(1)
else:
current_network_interface_index = int(current_network_interface_index)
if any([int(current_network_interface_index) < 1,
int(current_network_interface_index) > network_interface_index]):
self.print_error('Your number: ', current_network_interface_index,
' is not within range (', '1-' + str(network_interface_index), ')')
exit(1)
current_network_interface = ''
try:
current_network_interface = str(available_network_interfaces[current_network_interface_index - 1])
except:
self.print_error('This network interface has some problem!')
exit(1)
if not only_wireless:
self.print_info('Your choose network interface: ', current_network_interface)
else:
self.print_info('Your choose wireless network interface: ', current_network_interface)
return current_network_interface
if len(available_network_interfaces) == 1:
self.get_interface_settings(interface_name=available_network_interfaces[0],
required_parameters=[], quiet=True)
if not only_wireless:
self.print_info('You have only one network interface: ', available_network_interfaces[0])
else:
self.print_info('You have only one wireless network interface: ', available_network_interfaces[0])
return available_network_interfaces[0]
if len(available_network_interfaces) == 0:
if not only_wireless:
self.print_error('Network interfaces not found!')
else:
self.print_error('Wireless network interfaces not found!')
exit(1)
def check_network_interface_is_wireless(self, interface_name: str = 'wlan0') -> bool:
"""
Check network interface is wireless
:param interface_name: Network interface name (example: 'wlan0')
:return: True or False
"""
try:
current_platform: str = self.get_platform()
# Mac OS
if current_platform.startswith('Darwin'):
interface_info: sub.CompletedProcess = \
sub.run(['networksetup -listnetworkserviceorder | grep ' + interface_name],
shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
interface_info: str = interface_info.stdout.decode('utf-8')
assert 'Wi-Fi' in interface_info, 'Is not wireless interface!'
return True
# Linux
elif current_platform.startswith('Linux'):
interface_info: sub.CompletedProcess = \
sub.run(['iwconfig ' + interface_name],
shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
interface_info: str = interface_info.stdout.decode('utf-8')
assert 'no wireless extensions' not in interface_info, 'Is not wireless interface!'
return True
# Windows
elif current_platform.startswith('Windows'):
netsh_command: sub.Popen = \
sub.Popen('netsh wlan show interfaces', shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
netsh_command_output, netsh_command_error = netsh_command.communicate()
interfaces_info: str = netsh_command_output.decode('utf-8') + \
netsh_command_error.decode('utf-8')
assert 'no wireless extensions' not in interfaces_info, 'Is not wireless interface!'
return True
# Other
else:
return False
except AssertionError:
return False
# @staticmethod
# def get_netiface_essid(interface_name):
# try:
# wifi = Wireless(interface_name)
# essid = wifi.getEssid()
# except:
# essid = None
# return essid
#
# @staticmethod
# def get_netiface_frequency(interface_name):
# try:
# wifi = Wireless(interface_name)
# frequency = wifi.getFrequency()
# except:
# frequency = 0
# return frequency
def get_interface_settings(self,
interface_name: str = 'eth0',
required_parameters: List[str] = ['mac-address'],
quiet: bool = True) -> Dict[str, Union[None, str, List[str]]]:
"""
Get network interface settings
:param interface_name: Network interface name (default: 'eth0')
:param required_parameters: Required Network interface parameters list (default: ['mac-address'])
:param quiet: Quiet mode, if True no console output (default: True)
:return: Network interface settings dictionary
(example: {'network-interface': 'example-network-interface',
'is-wireless': False,
'essid': 'AP',
'bssid': '12:34:56:78:90:ab',
'channel': 1,
'mac-address': '12:34:56:78:90:ab',
'ipv4-address': '192.168.0.1',
'ipv6-link-address': 'fe80::1234:5678:90ab:cdef',
'ipv6-global-address': '2001:4860:4860::8888',
'ipv6-global-addresses': ['2001:4860:4860::8888', '2001:4860:4860::8844'],
'ipv4-netmask': '255.255.255.0',
'ipv4-network': '192.168.0.0/24',
'first-ipv4-address': '192.168.0.1',
'second-ipv4-address': '192.168.0.2',
'penultimate-ipv4-address': '192.168.0.253',
'last-ipv4-address': '192.168.0.254',
'ipv4-broadcast': '192.168.0.255',
'ipv4-gateway': '192.168.0.254',
'ipv6-gateway': 'fe80::1234:5678:8765:4321'})
"""
if interface_name not in self._network_interfaces_settings.keys():
wireless_interface_settings: Dict[str, Union[None, int, float, str]] = \
self.get_wireless_interface_settings(interface_name=interface_name)
self._network_interfaces_settings[interface_name]: \
Dict[str, Union[None, bool, int, float, str, List[str]]] = {
'network-interface': interface_name,
'is-wireless': self.check_network_interface_is_wireless(interface_name=interface_name),
'essid': wireless_interface_settings['essid'],
'bssid': wireless_interface_settings['bssid'],
'channel': wireless_interface_settings['channel'],
'mac-address': self.get_interface_mac_address(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv4-address': self.get_interface_ip_address(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv6-link-address': self.get_interface_ipv6_link_address(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv6-global-address': self.get_interface_ipv6_glob_address(interface_name=interface_name),
'ipv6-global-addresses': self.get_interface_ipv6_glob_addresses(interface_name=interface_name),
'ipv4-netmask': self.get_interface_netmask(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv4-network': self.get_interface_network(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'first-ipv4-address': self.get_first_ip_on_interface(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'second-ipv4-address': self.get_second_ip_on_interface(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'penultimate-ipv4-address': self.get_penultimate_ip_on_interface(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'last-ipv4-address': self.get_last_ip_on_interface(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv4-broadcast': self.get_interface_broadcast(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv4-gateway': self.get_interface_ipv4_gateway(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet),
'ipv6-gateway': self.get_interface_ipv6_gateway(interface_name=interface_name,
exit_on_failure=False,
quiet=quiet)}
try:
for required_parameter in required_parameters:
if required_parameter in self._network_interfaces_settings[interface_name].keys():
assert self._network_interfaces_settings[interface_name][required_parameter] is not None, \
'Network interface: ' + self.error_text(interface_name) + \
' does not have: ' + self.error_text(required_parameter)
return self._network_interfaces_settings[interface_name]
except AssertionError as Error:
self.print_error(Error.args[0])
exit(1)
def get_wireless_interface_settings(self,
interface_name: str = 'wlan0') -> Dict[str, Union[None, int, str]]:
if interface_name in self._network_interfaces_settings.keys():
return {
'essid': self._network_interfaces_settings[interface_name]['essid'],
'bssid': self._network_interfaces_settings[interface_name]['bssid'],
'channel': self._network_interfaces_settings[interface_name]['channel']
}
else:
result: Dict[str, Union[None, int, str]] = {
'essid': None,
'bssid': None,
'channel': None
}
if interface_name in self.list_of_wireless_network_interfaces():
# region Linux
if self.get_platform().startswith('Linux'):
result['essid']: str = str(sub.run(['iwgetid -r ' + interface_name],
shell=True, stdout=sub.PIPE).stdout.decode('utf-8').rstrip())
result['bssid']: str = str(sub.run(['iwgetid -a -r ' + interface_name],
shell=True, stdout=sub.PIPE).stdout.decode('utf-8').rstrip())
result['channel']: int = int(sub.run(['iwgetid -c -r ' + interface_name],
shell=True, stdout=sub.PIPE).stdout.decode('utf-8').rstrip())
# endregion
# region Windows
elif self.get_platform().startswith('Windows'):
netsh_command: sub.Popen = \
sub.Popen('netsh wlan show interfaces', shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
netsh_command_output, netsh_command_error = netsh_command.communicate()
interfaces_info: str = netsh_command_output.decode('utf-8') + netsh_command_error.decode('utf-8')
interfaces_info: List[str] = interfaces_info.splitlines()
interface_settings: Dict[str, Dict[str, Union[None, int, str]]] = dict()
current_interface: Union[None, str] = None
for output_line in interfaces_info:
if 'Name' in output_line:
search_result = search(r'^ +Name +: (?P<interface_name>.*)$', output_line)
if search_result is not None:
current_interface = search_result.group('interface_name')
interface_settings[current_interface]: Dict[str, Union[None, int, str]] = {
'essid': None,
'bssid': None,
'channel': None
}
if ' SSID' in output_line:
search_result = search(r'^ +SSID +: (?P<essid>.*)$', output_line)
if search_result is not None:
interface_settings[current_interface]['essid']: str = \
str(search_result.group('essid'))
if ' BSSID' in output_line:
search_result = search(r'^ +BSSID +: (?P<bssid>.*)$', output_line)
if search_result is not None:
interface_settings[current_interface]['bssid']: str = \
str(search_result.group('bssid'))
if ' Channel' in output_line:
search_result = search(r'^ +Channel +: (?P<channel>.*)$', output_line)
if search_result is not None:
interface_settings[current_interface]['channel']: int = \
int(search_result.group('channel'))
if 'Hosted network status' in output_line:
break
result = interface_settings[interface_name]
# endregion
return result
def get_interface_mac_address(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 7,
quiet: bool = False) -> Union[None, str]:
"""
Get MAC address of the network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: True)
:param exit_code: Set exit code integer (default: 7)
:param quiet: Quiet mode, if True no console output (default: False)
:return: MAC address string (example: '01:23:45:67:89:0a') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['mac-address'] is not None:
return self._network_interfaces_settings[interface_name]['mac-address']
try:
return str(ifaddresses(interface_name)[AF_LINK][0]['addr'])
except NameError:
return get_mac_address(interface=interface_name)
except ValueError:
pass
except KeyError:
pass
if not quiet:
self.print_error('Network interface: ', interface_name, ' does not have MAC address!')
if exit_on_failure:
exit(exit_code)
return None
def get_interface_ip_address(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 8,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv4 address of the network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 8)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.1') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv4-address'] is not None:
return self._network_interfaces_settings[interface_name]['ipv4-address']
try:
if self.get_platform().startswith('Windows'):
for adapter in self._windows_adapters:
for ip in adapter.ips:
if ip.nice_name == interface_name and ip.is_IPv4:
return ip.ip
return None
else:
return str(ifaddresses(interface_name)[AF_INET][0]['addr'])
except ValueError:
pass
except KeyError:
pass
if not quiet:
self.print_error('Network interface: ', interface_name, ' does not have IP address!')
if exit_on_failure:
exit(exit_code)
return None
def get_interface_ipv6_address(self,
interface_name: str = 'eth0',
address_index: int = 0,
exit_on_failure: bool = False,
exit_code: int = 9,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv6 address of the network interface
:param interface_name: Network interface name (default: 'eth0')
:param address_index: Index of IPv6 address (default: 0)
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 9)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv6 address string (example: 'fd00::1') or None in case of error
"""
try:
if self.get_platform().startswith('Windows'):
ipv6_addresses: List[str] = list()
for adapter in self._windows_adapters:
for ip in adapter.ips:
if ip.nice_name == interface_name and ip.is_IPv6:
ipv6_addresses.append(ip.ip[0])
return ipv6_addresses[address_index]
else:
ipv6_address = str(ifaddresses(interface_name)[AF_INET6][address_index]['addr'])
ipv6_address = ipv6_address.replace('%' + interface_name, '', 1)
except NameError:
ipv6_address = '::1'
except IndexError:
ipv6_address = None
except ValueError:
ipv6_address = None
except KeyError:
ipv6_address = None
if ipv6_address is None:
if not quiet:
self.print_error('Network interface: ', interface_name,
' does not have IPv6 address with index: ', str(address_index))
if exit_on_failure:
exit(exit_code)
return ipv6_address
def get_interface_ipv6_link_address(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 10,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv6 link local address of the network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 10)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv6 link local address string (example: 'fe80::1') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv6-link-address'] is not None:
return self._network_interfaces_settings[interface_name]['ipv6-link-address']
if interface_name == 'lo':
return '::1'
for address_index in range(0, 10, 1):
ipv6_address = self.get_interface_ipv6_address(interface_name=interface_name,
address_index=address_index,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
try:
# IPv6 link local address starts with: 'fe80::'
if ipv6_address.startswith('fe80::'):
return ipv6_address
except AttributeError:
if not quiet:
self.print_error('Network interface: ', interface_name, ' does not have IPv6 link local address!')
if exit_on_failure:
exit(exit_code)
return None
return None
def get_interface_ipv6_glob_address(self,
interface_name: str = 'eth0') -> Union[None, str]:
"""
Get IPv6 global address of the network interface
:param interface_name: Network interface name (default: 'eth0')
:return: IPv6 global address string (example: 'fd00::1') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv6-global-address'] is not None:
return self._network_interfaces_settings[interface_name]['ipv6-global-address']
address_index: int = 0
ipv6_address: Union[None, str] = self.get_interface_ipv6_address(interface_name=interface_name,
address_index=address_index,
exit_on_failure=False,
quiet=True)
while ipv6_address is not None:
# IPv6 link local address starts with: 'fe80::'
if not ipv6_address.startswith('fe80::'):
return ipv6_address
address_index += 1
ipv6_address: Union[None, str] = self.get_interface_ipv6_address(interface_name=interface_name,
address_index=address_index,
exit_on_failure=False,
quiet=True)
return None
def get_interface_ipv6_glob_addresses(self,
interface_name: str = 'eth0') -> List[str]:
"""
Get IPv6 global addresses list of the network interface
:param interface_name: Network interface name (default: 'eth0')
:return: IPv6 global addresses list (example: ['fd00::1', 'fd00::2'])
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv6-global-addresses'] is not None:
return self._network_interfaces_settings[interface_name]['ipv6-global-addresses']
ipv6_addresses: List[str] = list()
address_index: int = 0
ipv6_address: Union[None, str] = self.get_interface_ipv6_address(interface_name=interface_name,
address_index=address_index,
exit_on_failure=False,
quiet=True)
while ipv6_address is not None:
# IPv6 link local address starts with: 'fe80::'
if not ipv6_address.startswith('fe80::'):
ipv6_addresses.append(ipv6_address)
address_index += 1
ipv6_address: Union[None, str] = self.get_interface_ipv6_address(interface_name=interface_name,
address_index=address_index,
exit_on_failure=False,
quiet=True)
return ipv6_addresses
def make_ipv6_link_address(self,
mac_address: str = '01:23:45:67:89:0a',
exit_on_failure: bool = True,
exit_code: int = 12,
quiet: bool = False) -> Union[None, str]:
"""
Make IPv6 link local address by MAC address
:param mac_address: MAC address (default: '01:23:45:67:89:0a')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 12)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv6 link local address string (example: 'fe80::1') or None in case of error
"""
try:
assert self.mac_address_validation(mac_address=mac_address,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet), \
'Failed to make IPv6 link local address from MAC address: ' + self.error_text(str(mac_address))
parts: List[str] = mac_address.split(':')
parts.insert(3, 'ff')
parts.insert(4, 'fe')
parts[0] = '%x' % (int(parts[0], 16) ^ 2)
ipv6_parts: List[str] = list()
ipv6_parts_clear: List[str] = list()
for index in range(0, len(parts), 2):
ipv6_parts.append(''.join(parts[index:index + 2]))
for ipv6_part in ipv6_parts:
if ipv6_part.startswith('0'):
ipv6_part = ipv6_part[1:]
if ipv6_part.startswith('0'):
ipv6_part = ipv6_part[1:]
if ipv6_part.startswith('0'):
ipv6_part = ipv6_part[1:]
if ipv6_part.startswith('0'):
ipv6_part = ':'
ipv6_parts_clear.append(ipv6_part)
return 'fe80::%s' % (':'.join(ipv6_parts_clear))
except AssertionError as Error:
error_text = Error.args[0]
if not quiet:
self.print_error(error_text)
if exit_on_failure:
exit(exit_code)
else:
return None
def get_interface_netmask(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 13,
quiet: bool = False) -> Union[None, str]:
"""
Get network interface mask
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 13)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Network interface mask string (example: '255.255.255.0') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv4-netmask'] is not None:
return self._network_interfaces_settings[interface_name]['ipv4-netmask']
try:
if self.get_platform().startswith('Windows'):
for adapter in self._windows_adapters:
for ip in adapter.ips:
if ip.nice_name == interface_name and ip.is_IPv4:
bits = 0xffffffff ^ (1 << 32 - ip.network_prefix) - 1
return str(inet_ntoa(pack('>I', bits)))
return None
else:
return str(ifaddresses(interface_name)[AF_INET][0]['netmask'])
except ValueError:
pass
except KeyError:
pass
if not quiet:
self.print_error('Network interface: ', interface_name, ' does not have network mask!')
if exit_on_failure:
exit(exit_code)
return None
def get_interface_network(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 14,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv4 network on interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 14)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 network string (example: '192.168.1.0/24') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv4-network'] is not None:
return self._network_interfaces_settings[interface_name]['ipv4-network']
try:
netmask = self.get_interface_netmask(interface_name=interface_name,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
ip_address = self.get_interface_ip_address(interface_name=interface_name,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
ip = IPNetwork(ip_address + '/' + netmask)
return str(ip[0]) + '/' + str(IPAddress(netmask).netmask_bits())
except KeyError:
pass
except ValueError:
pass
except TypeError:
pass
if not quiet:
self.print_error('Network interface: ', interface_name, ' does not have IPv4 address or network mask!')
if exit_on_failure:
exit(exit_code)
return None
def get_ip_on_interface_by_index(self,
interface_name: str = 'eth0',
index: int = 1,
exit_on_failure: bool = True,
exit_code: int = 15,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv4 address on network interface by index of address
:param interface_name: Network interface name (default: 'eth0')
:param index: Index of IPv4 address integer (default: 1)
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 15)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.1') or None in case of error
"""
try:
network: IPNetwork = IPNetwork(self.get_interface_network(interface_name=interface_name,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet))
return str(network[index])
except KeyError:
pass
except ValueError:
pass
except TypeError:
pass
if not quiet:
self.print_error('Network interface: ', interface_name, ' does not have IPv4 address or network mask!')
if exit_on_failure:
exit(exit_code)
return None
def get_first_ip_on_interface(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 16,
quiet: bool = False) -> Union[None, str]:
"""
Get first IPv4 address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 16)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.1') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['first-ipv4-address'] is not None:
return self._network_interfaces_settings[interface_name]['first-ipv4-address']
return self.get_ip_on_interface_by_index(interface_name=interface_name,
index=1,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def get_second_ip_on_interface(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 17,
quiet: bool = False) -> Union[None, str]:
"""
Get second IPv4 address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 17)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.2') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['second-ipv4-address'] is not None:
return self._network_interfaces_settings[interface_name]['second-ipv4-address']
return self.get_ip_on_interface_by_index(interface_name=interface_name,
index=2,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def get_penultimate_ip_on_interface(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 18,
quiet: bool = False) -> Union[None, str]:
"""
Get penultimate IPv4 address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 18)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.253') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['penultimate-ipv4-address'] is not None:
return self._network_interfaces_settings[interface_name]['penultimate-ipv4-address']
return self.get_ip_on_interface_by_index(interface_name=interface_name,
index=-3,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def get_last_ip_on_interface(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 19,
quiet: bool = False) -> Union[None, str]:
"""
Get last IPv4 address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 19)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.254') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['last-ipv4-address'] is not None:
return self._network_interfaces_settings[interface_name]['last-ipv4-address']
return self.get_ip_on_interface_by_index(interface_name=interface_name,
index=-2,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def get_random_ip_on_interface(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 20,
quiet: bool = False) -> Union[None, str]:
"""
Get random IPv4 address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 20)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.123') or None in case of error
"""
try:
network = IPNetwork(self.get_interface_network(interface_name=interface_name,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet))
return str(network[randint(2, len(network) - 3)])
except KeyError:
pass
except ValueError:
pass
except TypeError:
pass
if not quiet:
self.print_error('Network interface: ', interface_name, ' does not have IPv4 address or network mask!')
if exit_on_failure:
exit(exit_code)
return None
def get_interface_broadcast(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 21,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv4 broadcast address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 21)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.255') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv4-broadcast'] is not None:
return self._network_interfaces_settings[interface_name]['ipv4-broadcast']
return self.get_ip_on_interface_by_index(interface_name=interface_name,
index=-1,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def get_interface_gateway(self,
interface_name: str = 'eth0',
network_type: int = AF_INET,
exit_on_failure: bool = True,
exit_code: int = 22,
quiet: bool = False) -> Union[None, str]:
"""
Get gateway address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param network_type: Set network type AF_INET for IPv4 network or AF_INET6 for IPv6 (default: AF_INET)
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 22)
:param quiet: Quiet mode, if True no console output (default: False)
:return: Address string (example: '192.168.1.254') or None in case of error
"""
try:
gateway_address = None
if self.get_platform().startswith('Windows'):
routes: Dict[str, str] = dict()
route_table: List[str] = sub.check_output('route print', shell=True).decode().splitlines()
if network_type == AF_INET:
for output_string in route_table:
if match(r'^ +[0-9.]{7,15} +[0-9.]{7,15} +[0-9.]{7,15} +[0-9.]{7,15} +\d{1,3}$', output_string):
address = output_string.split()
routes[address[3]] = address[2]
interface_address = self.get_interface_ip_address(interface_name=interface_name,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
if interface_address in routes.keys():
return routes[interface_address]
else:
gws = gateways()
for gw in gws:
gateway_interface = gws[gw][network_type]
gateway_ip, interface = gateway_interface[0], gateway_interface[1]
if interface == interface_name:
gateway_address = gateway_ip
break
except KeyError:
gateway_address = None
except ValueError:
gateway_address = None
except IndexError:
gateway_address = None
if gateway_address is None:
if not quiet:
if network_type == AF_INET:
if exit_on_failure:
self.print_error('Network interface: ', interface_name, ' does not have IPv4 gateway!')
else:
self.print_warning('Network interface: ', interface_name, ' does not have IPv4 gateway!')
if network_type == AF_INET6:
if exit_on_failure:
self.print_error('Network interface: ', interface_name, ' does not have IPv6 gateway!')
else:
self.print_warning('Network interface: ', interface_name, ' does not have IPv6 gateway!')
if exit_on_failure:
exit(exit_code)
return gateway_address
def get_interface_ipv4_gateway(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 23,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv4 gateway address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 23)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.254') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv4-gateway'] is not None:
return self._network_interfaces_settings[interface_name]['ipv4-gateway']
return self.get_interface_gateway(interface_name=interface_name,
network_type=AF_INET,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def get_interface_ipv6_gateway(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 24,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv6 gateway address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 24)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv6 address string (example: 'fd00::1') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv6-gateway'] is not None:
return self._network_interfaces_settings[interface_name]['ipv6-gateway']
return self.get_interface_gateway(interface_name=interface_name,
network_type=AF_INET6,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def add_multicast_mac_address(self,
interface_name: str = 'eth0',
multicast_mac_address: str = '33:33:00:00:00:02',
exit_on_failure: bool = True,
exit_code: int = 24,
quiet: bool = False) -> bool:
"""
Add Multicast MAC address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param multicast_mac_address: Multicast MAC address (example: '33:33:00:00:00:02')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 24)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if success or False if error
"""
if interface_name in self._network_interfaces_multicast_macs.keys():
if multicast_mac_address in self._network_interfaces_multicast_macs[interface_name]:
return True
else:
self._network_interfaces_multicast_macs[interface_name]: List[str] = list()
try:
# region Windows
if self.get_platform().startswith('Windows'):
pass
# endregion
# region MacOS
elif self.get_platform().startswith('Darwin'):
self._network_interfaces_multicast_macs[interface_name].append(multicast_mac_address)
return True
# endregion
# region Linux
elif self.get_platform().startswith('Linux'):
mcast_addresses = sub.run(['ip maddress show ' + interface_name],
shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
mcast_addresses = mcast_addresses.stdout.decode('utf-8')
if multicast_mac_address in mcast_addresses:
self._network_interfaces_multicast_macs[interface_name].append(multicast_mac_address)
else:
add_mcast_address = sub.run(['ip maddress add ' + multicast_mac_address + ' dev ' + interface_name],
shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
add_mcast_address = add_mcast_address.stdout.decode('utf-8')
assert add_mcast_address == '', \
'Could not add milticast MAC address: ' + self.error_text(multicast_mac_address) + \
' on interface: ' + self.error_text(interface_name)
self._network_interfaces_multicast_macs[interface_name].append(multicast_mac_address)
if not quiet:
self.print_info('Add milticast MAC address: ', multicast_mac_address,
' on interface: ', interface_name)
return True
# endregion
else:
assert False, 'Your platform: ' + self.error_text(self.get_platform()) + ' is not supported!'
except AssertionError as Error:
if not quiet:
self.print_error(Error.args[0])
if exit_on_failure:
exit(exit_code)
return False
# endregion
# region Check installed software
def apt_list_installed_packages(self,
exit_on_failure: bool = True,
exit_code: int = 25,
quiet: bool = False) -> Union[None, bytes]:
"""
Get output of bash command: apt list --installed
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 25)
:param quiet: Quiet mode, if True no console output (default: False)
:return: result bytes
"""
try:
apt_list_command = sub.Popen(['apt list --installed'], shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
apt_list_out, apt_list_err = apt_list_command.communicate()
assert apt_list_out is not None, \
'Something else went wrong while trying to run command: ' + \
self.error_text('`apt list --installed`')
self.os_installed_packages_list = apt_list_out
return apt_list_out
except OSError:
error_text = 'Something else went wrong while trying to run command: ' + \
self.error_text('`apt list --installed`')
except AssertionError as Error:
error_text = Error.args[0]
if not quiet:
self.print_error(error_text)
if exit_on_failure:
exit(exit_code)
return None
def check_installed_software(self,
software_name: str = 'apache2',
exit_on_failure: bool = True,
exit_code: int = 26,
quiet: bool = False) -> bool:
"""
Check software is installed or not
:param software_name: Name of software (default: 'apache2')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 26)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True or False
"""
try:
assert self.check_platform(exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet), \
'This is not a Linux platform'
assert not ('Kali' in linux_distribution()
or 'Debian' in linux_distribution()
or 'Ubuntu' in linux_distribution()), \
'Unable to verify OS installed software. ' + \
'This function works normal only in Debian, Ubuntu or Kali linux.'
if self.os_installed_packages_list is None:
self.apt_list_installed_packages(exit_on_failure)
assert self.os_installed_packages_list is not None, 'Unable to verify OS installed software.'
if software_name.encode(encoding='utf-8') in self.os_installed_packages_list:
return True
else:
if isfile('/bin/' + software_name) or isfile('/sbin/' + software_name) or \
isfile('/usr/bin/' + software_name) or isfile('/usr/sbin/' + software_name) or \
isfile('/usr/local/bin/' + software_name) or isfile('/usr/local/sbin/' + software_name):
return True
else:
return False
except AssertionError as Error:
error_text = Error.args[0]
if 'Debian, Ubuntu or Kali linux' in error_text:
if not quiet:
self.print_warning(error_text)
if isfile('/bin/' + software_name) or isfile('/sbin/' + software_name) or \
isfile('/usr/bin/' + software_name) or isfile('/usr/sbin/' + software_name) or \
isfile('/usr/local/bin/' + software_name) or isfile('/usr/local/sbin/' + software_name):
return True
else:
return False
else:
if not quiet:
self.print_error(error_text)
if exit_on_failure:
exit(exit_code)
return False
# endregion
# region Process control functions
@staticmethod
def check_process(process_name: str = 'systemd') -> int:
"""
Check process is running
:param process_name: Process name string (default: 'systemd')
:return: Process ID integer (example: 1)
"""
for process in ps.process_iter():
if 'python' in process.name():
for argument in process.cmdline():
if process_name in argument:
return int(process.pid)
if process.name() == process_name:
return int(process.pid)
return -1
def get_process_pid(self, process_name: str = 'systemd') -> int:
"""
Get process ID
:param process_name: Process name string (default: 'apache2')
:return: Process ID integer (example: 1234)
"""
return self.check_process(process_name)
def get_process_pid_by_listen_port(self,
listen_port: int = 80,
listen_address: Union[None, str] = None,
listen_proto: Union[None, str] = None,
exit_on_failure: bool = True,
exit_code: int = 27,
quiet: bool = False) -> Union[None, List[int]]:
"""
Get list of processes ID by listen TCP or UDP port
:param listen_port: Listening TCP or UDP port integer (default: 80)
:param listen_address: Listening IPv4 or IPv6 address string (default: None)
:param listen_proto: Listening protocol string 'tcp' or 'udp' (default: 'tcp')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 27)
:param quiet: Quiet mode, if True no console output (default: False)
:return: List of processes ID by listen TCP or UDP port
"""
pids: List[int] = list()
try:
assert 1 < listen_port < 65535, \
'Bad listen port: ' + self.error_text(str(listen_port)) + \
' listen port must be in range: ' + self.info_text('1 - 65535')
assert (listen_proto is None or listen_proto == 'tcp' or listen_proto == 'udp'), \
'Bad value in listen proto: ' + self.error_text(str(listen_proto)) + \
' listen proto must be ' + self.info_text('None' + ' or ' + '\'tcp\'' + ' or ' + '\'udp\'')
if listen_proto is None:
listen_proto = 'tcp'
for process in ps.process_iter():
connections = process.connections()
for connection in connections:
(address, port) = connection.laddr
if connection.type == sock.SOCK_STREAM and connection.status == ps.CONN_LISTEN:
proto = 'tcp'
elif connection.type == sock.SOCK_DGRAM:
proto = 'udp'
else:
continue
if listen_address is not None:
if address == listen_address and proto == listen_proto \
and port == listen_port and process.pid is not None:
pids.append(process.pid)
else:
if proto == listen_proto and port == listen_port and process.pid is not None:
pids.append(process.pid)
return pids
except ps.NoSuchProcess:
return pids
except AssertionError as Error:
if not quiet:
self.print_error(Error.args[0])
if exit_on_failure:
exit(exit_code)
return None
def kill_process(self, process_pid: int) -> bool:
"""
Kill process by ID
:param process_pid: Process ID integer
:return: True if kill process or False if not
"""
try:
if self.get_platform().startswith('Windows'):
sub.check_output('taskkill /F /PID ' + str(process_pid), shell=True)
else:
process = ps.Process(process_pid)
process.terminate()
return True
except ps.NoSuchProcess:
return False
def kill_process_by_name(self, process_name: str = 'apache2') -> bool:
"""
Kill process by name
:param process_name: Process name string (default: apache2)
:return: True if kill process or False if not
"""
if self.get_platform().startswith('Windows'):
sub.check_output('taskkill /F /IM ' + process_name, shell=True)
return True
else:
process_pid = self.get_process_pid(process_name)
if process_pid != -1:
while (self.get_process_pid(process_name) != -1):
self.kill_process(process_pid)
return True
else:
return False
def kill_processes_by_listen_port(self,
listen_port: int = 80,
listen_address: Union[None, str] = None,
listen_proto: str = 'tcp') -> bool:
"""
Kill processes by listen TCP or UDP port
:param listen_port: Listening TCP or UDP port integer (default: 80)
:param listen_address: Listening IPv4 or IPv6 address string (default: None)
:param listen_proto: Listening protocol string 'tcp' or 'udp' (default: 'tcp')
:return: True if kill all processes or False if not
"""
# Get pids all process and kill
pid_list: List[int] = self.get_process_pid_by_listen_port(listen_port, listen_address, listen_proto)
if len(pid_list) > 0:
for pid in pid_list:
if not self.kill_process(pid):
return False
return True
else:
return False
# endregion
# region Others functions
def ipv6_address_validation(self,
ipv6_address: str = 'fd00::1',
exit_on_failure: bool = False,
exit_code: int = 28,
quiet: bool = True) -> bool:
"""
Validate IPv6 address string
:param ipv6_address: IPv6 address string (example: 'fd00::1')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 28)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if a valid IPv6 address or False if not
"""
try:
sock.inet_pton(sock.AF_INET6, ipv6_address)
return True
except sock.error:
if not quiet:
self.print_error('Failed to validate IPv6 address: ', str(ipv6_address))
if exit_on_failure:
exit(exit_code)
return False
def ip_address_validation(self,
ip_address: str = '192.168.1.1',
exit_on_failure: bool = False,
exit_code: int = 29,
quiet: bool = True) -> bool:
"""
Validate IPv4 address string
:param ip_address: IPv4 address string (example: '192.168.1.1')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 29)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if a valid IPv4 address or False if not
"""
try:
sock.inet_aton(ip_address)
return True
except sock.error:
if not quiet:
self.print_error('Failed to validate IP address: ', str(ip_address))
if exit_on_failure:
exit(exit_code)
return False
def mac_address_validation(self,
mac_address: str = '01:23:45:67:89:0a',
exit_on_failure: bool = False,
exit_code: int = 30,
quiet: bool = False) -> bool:
"""
Validate MAC address string
:param mac_address: MAC address string (example: '01:23:45:67:89:0a')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 10)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if a valid MAC address or False if not
"""
if match(r'^([0-9a-fA-F]{2}[:]){5}([0-9a-fA-F]{2})$', mac_address):
return True
else:
if not quiet:
self.print_error('Failed to validate MAC address: ', str(mac_address))
if exit_on_failure:
exit(exit_code)
return False
def mac_address_normalization(self,
mac_address: str = 'AB:CD:EF:AB:CD:EF',
exit_on_failure: bool = False,
exit_code: int = 30,
quiet: bool = False) -> Union[None, str]:
"""
Validate MAC address string
:param mac_address: MAC address string (example: 'AB:CD:EF:AB:CD:EF')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 10)
:param quiet: Quiet mode, if True no console output (default: False)
:return: MAC address (example: 'ab:cd:ef:ab:cd:ef') or None if not
"""
try:
assert self.mac_address_validation(mac_address=mac_address), 'Bad MAC address'
return mac_address.lower()
except AssertionError:
if not quiet:
self.print_error('Failed to normalize MAC address: ', str(mac_address))
if exit_on_failure:
exit(exit_code)
return None
def ip_address_in_range(self,
ip_address: str = '192.168.1.2',
first_ip_address: str = '192.168.1.1',
last_ip_address: str = '192.168.1.3',
exit_on_failure: bool = False,
exit_code: int = 31,
quiet: bool = False) -> bool:
"""
Check IPv4 address in range
:param ip_address: IPv4 address string (example: '192.168.1.2')
:param first_ip_address: First IPv4 address in range (example: '192.168.1.1')
:param last_ip_address: Last IPv4 address in range (example: '192.168.1.3')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 31)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if IPv4 address in range or False if not
"""
try:
assert (IPv4Address(first_ip_address) <= IPv4Address(ip_address) <= IPv4Address(last_ip_address)), \
'IP address: ' + self.error_text(str(ip_address)) + \
' not in range: ' + self.error_text(str(first_ip_address) + ' - ' + str(last_ip_address))
return True
except AddressValueError:
error_text = 'Bad IPv4 address: ' + self.error_text(str(ip_address))
except AssertionError as Error:
error_text = Error.args[0]
if not quiet:
self.print_error(error_text)
if exit_on_failure:
exit(exit_code)
return False
def ip_address_in_network(self,
ip_address: str = '192.168.1.1',
network: str = '192.168.1.0/24',
exit_on_failure: bool = False,
exit_code: int = 32,
quiet: bool = True) -> bool:
"""
Check IPv4 address in network
:param ip_address: IPv4 address string (example: '192.168.1.1')
:param network: IPv4 network string (example: '192.168.1.0/24')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 32)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if IPv4 address in network or False if not
"""
try:
assert IPAddress(ip_address) in IPNetwork(network), \
'IPv4 address: ' + self.error_text(str(ip_address)) + \
' not in IPv4 network: ' + self.error_text(str(network))
return True
except AddressValueError:
error_text = 'Bad IPv4 address: ' + self.error_text(str(ip_address))
except AddrFormatError:
error_text = 'Bad IPv4 network: ' + self.error_text(str(network)) + \
' or IPv4 address: ' + self.error_text(str(ip_address)) + \
' not in IPv4 network: ' + self.error_text(str(network))
except AssertionError as Error:
error_text = Error.args[0]
if not quiet:
self.print_error(error_text)
if exit_on_failure:
exit(exit_code)
return False
def ip_address_increment(self,
ip_address: str = '192.168.1.1',
exit_on_failure: bool = False,
exit_code: int = 33,
quiet: bool = False) -> Union[None, str]:
"""
Increment IPv4 address
:param ip_address: IPv4 address string (example: '192.168.1.1')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 33)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.2')
"""
try:
return str(IPv4Address(ip_address) + 1)
except AddressValueError:
if quiet:
self.print_error('Bad IPv4 address: ', str(ip_address))
if exit_on_failure:
exit(exit_code)
return None
def ip_address_decrement(self,
ip_address: str = '192.168.1.2',
exit_on_failure: bool = False,
exit_code: int = 34,
quiet: bool = False) -> Union[None, str]:
"""
Decrement IPv4 address
:param ip_address: IPv4 address string (example: '192.168.1.2')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 33)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.1')
"""
try:
return str(IPv4Address(ip_address) - 1)
except AddressValueError:
if quiet:
self.print_error('Bad IPv4 address: ', str(ip_address))
if exit_on_failure:
exit(exit_code)
return None
def ip_address_compare(self,
first_ip_address: str = '192.168.1.1',
second_ip_address: str = '192.168.1.1',
operator: str = 'eq',
exit_on_failure: bool = False,
exit_code: int = 35,
quiet: bool = False) -> bool:
"""
Compare IPv4 addresses
:param first_ip_address: First IPv4 address for compare (example: 192.168.0.1)
:param second_ip_address: Second IPv4 address for compare (example: 192.168.0.2)
:param operator: eq - equal; ne - not equal; gt - greater; ge - greater or equal; lt - less; le - less or equal (default: eq)
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 33)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True or False
"""
try:
assert (operator == 'eq' or operator == 'ne' or operator == 'gt' or operator == 'ge'
or operator == 'lt' or operator == 'le'), \
'Bad operator: ' + self.error_text(str(operator)) + \
' acceptable operator values: ' + self.info_text('eq - equal; ne - not equal; ' +
'gt - greater; ge - greater or equal; ' +
'lt - less; le - less or equal')
if operator == 'eq':
if IPv4Address(first_ip_address) == IPv4Address(second_ip_address):
return True
else:
return False
elif operator == 'ne':
if IPv4Address(first_ip_address) != IPv4Address(second_ip_address):
return True
else:
return False
elif operator == 'gt':
if IPv4Address(first_ip_address) > IPv4Address(second_ip_address):
return True
else:
return False
elif operator == 'ge':
if IPv4Address(first_ip_address) >= IPv4Address(second_ip_address):
return True
else:
return False
elif operator == 'lt':
if IPv4Address(first_ip_address) < IPv4Address(second_ip_address):
return True
else:
return False
elif operator == 'le':
if IPv4Address(first_ip_address) <= IPv4Address(second_ip_address):
return True
else:
return False
except AssertionError as Error:
error_text = Error.args[0]
except AddressValueError:
error_text = 'Bad ip address in input parameters'
if not quiet:
self.print_error(error_text)
if exit_on_failure:
exit(exit_code)
return False
def make_random_string(self, length: int = 8) -> str:
"""
Make random string from lowercase letter, uppercase letter and digits
:param length: Length of string (default: 8)
:return: Random string (example: d1dfJ3a032)
"""
return ''.join(choice(self.lowercase_letters + self.uppercase_letters + self.digits) for _ in range(length))
@staticmethod
def get_system_name_servers() -> List[str]:
name_servers_ip_addresses: List[str] = list()
resolve_conf_filename: str = '/etc/resolv.conf'
try:
assert isfile(resolve_conf_filename), \
'Not found ' + resolve_conf_filename + ' file!'
with open(resolve_conf_filename, 'r') as resolve_conf:
for settings_line in resolve_conf.read().splitlines():
settings_columns = settings_line.split()
if settings_columns[0] == 'nameserver':
name_servers_ip_addresses.append(settings_columns[1])
except AssertionError:
pass
return name_servers_ip_addresses
def get_vendor_by_mac_address(self, mac_address: str = '01:23:45:67:89:0a') -> str:
"""
Get vendor of host by MAC address
:param mac_address: MAC address of host (example: '01:23:45:67:89:0a')
:return: Vendor string
"""
if not self.mac_address_validation(mac_address):
return 'Unknown vendor'
mac_address: str = mac_address.upper()
for vendor_mac_prefix in self.vendors.keys():
if mac_address.startswith(vendor_mac_prefix):
return self.vendors[vendor_mac_prefix]
return 'Unknown vendor'
def macos_encode_mac_address(self, mac_address: str = '01:23:45:67:89:0a') -> str:
"""
Convert MAC address to MacOS format
:param mac_address: MAC address string (example: 01:23:45:67:89:0a)
:return: Converted MAC address string (example: 1:23:45:67:89:a)
"""
if self.mac_address_validation(mac_address):
address_in_macos_arp_table: str = ''
for part_of_address in mac_address.split(':'):
if part_of_address[0] == '0':
address_in_macos_arp_table += part_of_address[1] + ':'
else:
address_in_macos_arp_table += part_of_address + ':'
return address_in_macos_arp_table[:-1]
else:
return mac_address
def exec_command_over_ssh(self,
command: str = 'ifconfig',
ssh_user: str = 'root',
ssh_password: Union[None, str] = None,
ssh_pkey: Union[None, RSAKey] = None,
ssh_host: str = '192.168.0.1',
need_output: bool = True,
exit_on_failure: bool = True) -> Union[None, bool, str]:
"""
Exec cmd command over SSH
:param command: CMD command string (example: 'ifconfig')
:param ssh_user: SSH user string (example: 'root')
:param ssh_password: SSH password string or None if use ssh private key
:param ssh_pkey: SSH private key or None if use ssh password
:param ssh_host: SSH host string (example: '192.168.0.1')
:param need_output: Need command output or not (default: True)
:param exit_on_failure: Exit in case of error (default: False)
:return: True or False if not need output, Output string or None if need output
"""
command_result: Union[None, str] = None
try:
assert not (ssh_password is None and ssh_pkey is None), \
'SSH password and private key is None'
ssh_client: SSHClient = SSHClient()
ssh_client.set_missing_host_key_policy(AutoAddPolicy())
if ssh_password is not None:
ssh_client.connect(hostname=ssh_host, username=ssh_user, password=ssh_password)
if ssh_pkey is not None:
ssh_client.connect(hostname=ssh_host, username=ssh_user, pkey=ssh_pkey)
if need_output:
stdin, stdout, stderr = ssh_client.exec_command(command)
command_result = stdout.read().decode('utf-8') + stderr.read().decode('utf-8')
ssh_client.close()
return command_result
else:
ssh_client.exec_command(command)
ssh_client.close()
return True
except AssertionError as Error:
self.print_error(Error.args[0])
except NoValidConnectionsError:
self.print_error('Could not connect to SSH host: ', ssh_host)
except AuthenticationException:
self.print_error('SSH authentication error: ', ssh_user + '@' + ssh_host)
except SSHException as Error:
self.print_error('SSH Exception: ', Error.args[0])
if exit_on_failure:
exit(1)
if need_output:
return command_result
else:
return False
def download_file_over_ssh(self,
remote_path: str = '/tmp/test.txt',
local_path: str = 'test.txt',
ssh_user: str = 'root',
ssh_password: Union[None, str] = None,
ssh_pkey: Union[None, RSAKey] = None,
ssh_host: str = '192.168.0.1',
exit_on_failure: bool = True) -> Union[bool]:
"""
Transfer file over SSH
:param remote_path: Remote file path string
:param local_path: Local file path string
:param ssh_user: SSH user string (example: 'root')
:param ssh_password: SSH password string or None if use ssh private key
:param ssh_pkey: SSH private key or None if use ssh password
:param ssh_host: SSH host string (example: '192.168.0.1')
:param exit_on_failure: Exit in case of error (default: False)
:return: True or False if not need output, Output string or None if need output
"""
try:
assert not (ssh_password is None and ssh_pkey is None), \
'SSH password and private key is None'
ssh_client: SSHClient = SSHClient()
ssh_client.set_missing_host_key_policy(AutoAddPolicy())
if ssh_password is not None:
ssh_client.connect(hostname=ssh_host, username=ssh_user, password=ssh_password)
if ssh_pkey is not None:
ssh_client.connect(hostname=ssh_host, username=ssh_user, pkey=ssh_pkey)
sftp = ssh_client.open_sftp()
sftp.get(remote_path, local_path)
sftp.close()
ssh_client.close()
return True
except AssertionError as Error:
self.print_error(Error.args[0])
except NoValidConnectionsError:
self.print_error('Could not connect to SSH host: ', ssh_host)
except AuthenticationException:
self.print_error('SSH authentication error: ', ssh_user + '@' + ssh_host)
except SSHException as Error:
self.print_error('SSH Exception: ', Error.args[0])
except FileNotFoundError:
self.print_error('Not found remote file: ', remote_path)
if exit_on_failure:
exit(1)
return False
# endregion
# endregion
| 46.995146 | 133 | 0.533735 |
edf957586616a5aab0443eef46fb7fd92098ecfa | 1,274 | py | Python | apps/horizon-health-check/main.py | yonikashi/blocktest | db044d74afc62f80f8f74060830347e82dd03adb | [
"MIT"
] | 1 | 2018-12-30T10:44:48.000Z | 2018-12-30T10:44:48.000Z | apps/horizon-health-check/main.py | yonikashi/blocktest | db044d74afc62f80f8f74060830347e82dd03adb | [
"MIT"
] | 2 | 2021-04-20T17:22:32.000Z | 2021-09-23T23:35:46.000Z | apps/horizon-health-check/main.py | yonikashi/blocktest | db044d74afc62f80f8f74060830347e82dd03adb | [
"MIT"
] | null | null | null | """
Health check for Horizon.
Healthy: Corresponding stellar-core is 'Synced'
Unhealthy: Corresponding stellar-core is not 'Synced'
"""
import json
import time
import os
import requests
from flask import Flask
from flask_cors import CORS
APP = Flask(__name__)
CORS(APP)
START_TIMESTAMP = time.time()
# Load configuration from env variables
CORE_INFO_URL = os.environ['CORE_INFO_URL']
BUILD_VERSION = os.environ['BUILD_VERSION']
REQUEST_TIMEOUT = float(os.environ['REQUEST_TIMEOUT'])
def make_reply(msg, code):
"""Create a JSON reply for /status."""
reply = {
'status': 'Healthy' if code == 200 else 'Unhealthy',
'description': msg,
'start_timestamp': START_TIMESTAMP,
'build': BUILD_VERSION
}
return json.dumps(reply), code
@APP.route("/status")
def status():
"""Check if the stellar core is synced."""
try:
response = requests.get(CORE_INFO_URL, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
health = (response.json()['info']['state'] == 'Synced!')
if health:
return make_reply('Core is synced', 200)
return make_reply('Core is not synced', 503)
except Exception as e:
return make_reply('Could not check core: {}'.format(str(e)), 503)
| 24.980392 | 73 | 0.672684 |
4259739a972f19b4e235cdb6a7d24ee27044c362 | 7,024 | py | Python | exp/utils_visualise.py | zzzace2000/FIDO-saliency | fe0cc51cfe3299405e032c2e10b6f4ad13545822 | [
"CC-BY-4.0"
] | 29 | 2018-11-29T22:06:17.000Z | 2022-02-12T21:15:38.000Z | exp/utils_visualise.py | zzzace2000/FIDO-saliency | fe0cc51cfe3299405e032c2e10b6f4ad13545822 | [
"CC-BY-4.0"
] | null | null | null | exp/utils_visualise.py | zzzace2000/FIDO-saliency | fe0cc51cfe3299405e032c2e10b6f4ad13545822 | [
"CC-BY-4.0"
] | 1 | 2020-10-08T07:01:17.000Z | 2020-10-08T07:01:17.000Z | # -*- coding: utf-8 -*-
"""
Some utility functions for visualisation, not documented properly
"""
from skimage import color
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pylab
from torchvision.utils import make_grid
import torch
import matplotlib.patches as patches
def plot_results(x_test, x_test_im, sensMap, predDiff, tarFunc, classnames, testIdx, save_path):
'''
Plot the results of the relevance estimation
'''
imsize = x_test.shape
tarIdx = np.argmax(tarFunc(x_test)[-1])
tarClass = classnames[tarIdx]
#tarIdx = 287
plt.figure()
plt.subplot(2,2,1)
plt.imshow(x_test_im, interpolation='nearest')
plt.title('original')
frame = pylab.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
plt.subplot(2,2,2)
plt.imshow(sensMap, cmap=cm.Greys_r, interpolation='nearest')
plt.title('sensitivity map')
frame = pylab.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
plt.subplot(2,2,3)
p = predDiff.reshape((imsize[1],imsize[2],-1))[:,:,tarIdx]
plt.imshow(p, cmap=cm.seismic, vmin=-np.max(np.abs(p)), vmax=np.max(np.abs(p)), interpolation='nearest')
plt.colorbar()
#plt.imshow(np.abs(p), cmap=cm.Greys_r)
plt.title('weight of evidence')
frame = pylab.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
plt.subplot(2,2,4)
plt.title('class: {}'.format(tarClass))
p = get_overlayed_image(x_test_im, p)
#p = predDiff[0,:,:,np.argmax(netPred(net, x_test)[0]),1].reshape((224,224))
plt.imshow(p, cmap=cm.seismic, vmin=-np.max(np.abs(p)), vmax=np.max(np.abs(p)), interpolation='nearest')
#plt.title('class entropy')
frame = pylab.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
fig = plt.gcf()
fig.set_size_inches(np.array([12,12]), forward=True)
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.savefig(save_path)
plt.close()
def pytorch_to_np(pytorch_image):
return pytorch_image.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
def plot_pytorch_img(pytorch_img, ax, cmap=None, **kwargs):
return ax.imshow(pytorch_to_np(pytorch_img), cmap=cmap, interpolation='nearest', **kwargs)
def plot_rectangle(coord, ax, color='red'):
from matplotlib import patches
ax.add_patch(
patches.Rectangle(
coord[0, :], coord[1, 0] - coord[0, 0], coord[1, 1] - coord[0, 1],
color=color, fill=False # remove background
)
)
def _preprocess_img_to_pytorch(img):
if type(img) == np.ndarray:
img = torch.FloatTensor(img)
if img.ndimension() != 3:
raise Exception('The input dimension of image is not 3 but %d' % img.ndimension())
if img.shape[0] == 1:
img = img.expand(3, img.shape[1], img.shape[2])
return img
def plot_orig_and_overlay_img(orig_img, overlayed_img, file_name, bbox_coord=None, gt_class_name='',
pred_class_name='', cmap=cm.seismic, clim=None, visualize=False):
'''
:param orig_img: PyTorch 3d array [channel, width, height]
:param overlayed_img: PyTorch 3d array [channel, width, height]
:param visualize: Default True.
:return:
'''
orig_img = _preprocess_img_to_pytorch(orig_img)
overlayed_img = _preprocess_img_to_pytorch(overlayed_img)
if type(overlayed_img) == np.ndarray:
overlayed_img = torch.from_numpy(overlayed_img)
plt.close()
fig = plt.figure()
# Plot original image
ax1 = fig.add_subplot(121)
im1 = plot_pytorch_img(orig_img, ax1, cmap)
fig.colorbar(im1, ax=ax1)
## Plot the bounding box
ax1.add_patch(
patches.Rectangle(
bbox_coord[0, :], bbox_coord[1, 0] - bbox_coord[0, 0], bbox_coord[1, 1] - bbox_coord[0, 1],
color='red', fill=False # remove background
)
)
# Plot the overlayed image
ax2 = fig.add_subplot(122)
if clim is not None:
im2 = plot_pytorch_img(overlayed_img, ax2, cmap=cm.seismic, vmin=clim[0], vmax=clim[1])
fig.colorbar(im2, ax=ax2, cmap=cm.seismic, fraction=0.046, pad=0.04)
else:
im2 = plot_pytorch_img(overlayed_img, ax2, cmap=cm.seismic)
title = gt_class_name
if gt_class_name != pred_class_name:
title = '%s\n%s' % (gt_class_name, pred_class_name)
plt.title(title)
ax1.axis("off")
ax2.axis("off")
plt.subplots_adjust(left=0.075, bottom=0.2, right=0.9)
if visualize:
plt.show()
else:
plt.savefig(file_name, dpi=300)
plt.close()
def get_overlayed_image(orig_img, color_vec, cmap=cm.seismic):
'''
:return: color_overlay_img: the image overlayed with noise color
'''
orig_img = orig_img.cpu().numpy()
overlayed_img, clim = overlay(orig_img, color_vec, cmap=cmap)
return torch.from_numpy(overlayed_img), clim
def overlay(x, c, gray_factor_bg = 0.3, alpha=0.8, cmap=cm.seismic):
'''
For an image x and a relevance vector c, overlay the image with the
relevance vector to visualise the influence of the image pixels.
'''
assert np.ndim(c) <= 2, 'dimension of c is:' + str(np.ndim(c))
imDim = x.shape[0]
if np.ndim(c) == 1:
c = c.reshape((imDim, imDim))
# this happens with the MNIST Data
if np.ndim(x) == 2:
x = 1 - np.dstack((x, x, x)) * gray_factor_bg # make it a bit grayish
elif np.ndim(x) == 3: # this is what happens with cifar data
x = np.transpose(x, (1, 2, 0))
x = color.rgb2gray(x)
x = 1-(1-x)*0.3
x = np.dstack((x, x, x))
# Construct a colour image to superimpose
vlimit = abs(c.min()) if abs(c.min()) > abs(c.max()) else abs(c.max())
im = plt.imshow(c, cmap=cmap, interpolation='nearest', vmin=-vlimit, vmax=vlimit)
color_mask = im.to_rgba(c)[:,:,[0,1,2]]
clim = im.properties()['clim']
# Convert the input image and color mask to Hue Saturation Value (HSV) colorspace
img_hsv = color.rgb2hsv(x)
color_mask_hsv = color.rgb2hsv(color_mask)
# Replace the hue and saturation of the original image
# with that of the color mask
img_hsv[..., 0] = color_mask_hsv[..., 0]
img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha
img_masked = color.hsv2rgb(img_hsv)
img_masked = np.transpose(img_masked, (2, 0, 1))
return img_masked, clim
# Visualize and save
def save_figs(imgs_list, filename='', nrow=1, dpi=300, visualize=False, ax=None, clim=None):
grid = make_grid(imgs_list, nrow=nrow)
if ax is None:
fig, ax = plt.subplots()
im = plot_pytorch_img(grid, ax, clim=clim)
if not visualize:
plt.savefig(filename, dpi=dpi)
return ax
| 34.431373 | 109 | 0.624573 |
a360f08caff10c87d7e6edca7172a0cd69075388 | 25,128 | py | Python | legacy/adobe_tools/adobe_api.py | gbatye/IT-CPE | 60f5ad3b7f7727efada2387db359962240d03e59 | [
"Apache-2.0"
] | 506 | 2015-01-02T11:47:38.000Z | 2022-03-28T14:21:16.000Z | legacy/adobe_tools/adobe_api.py | gbatye/IT-CPE | 60f5ad3b7f7727efada2387db359962240d03e59 | [
"Apache-2.0"
] | 154 | 2015-12-09T23:58:03.000Z | 2022-02-27T22:48:36.000Z | legacy/adobe_tools/adobe_api.py | gbatye/IT-CPE | 60f5ad3b7f7727efada2387db359962240d03e59 | [
"Apache-2.0"
] | 173 | 2015-01-25T08:56:03.000Z | 2022-03-15T17:33:46.000Z | #!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates.
"""Module to interact with the Adobe User Management API."""
from __future__ import print_function
import json
import os
import platform
import random
import sys
import time
try:
import jwt
import requests
except ImportError:
print("Missing 'jwt' and/or 'requests' modules.")
exit(1)
if sys.version_info[0] == 2:
from ConfigParser import RawConfigParser
from urllib import urlencode
from urllib import quote
elif sys.version_info[0] >= 3:
from configparser import RawConfigParser
from urllib.parse import urlencode
# Constants for fallback
USERCONFIG_DEFAULT_LOC = '/Library/Adobe/usermanagement.config'
PRIVATE_KEY_DEFAULT_LOC = '/Library/Adobe/private.key'
CACHE_DEFAULT_LOC = '/Library/Adobe/adobe_tools.json'
# User lookup functions
def get_console_user():
"""Find out who is logged in right now."""
current_os = platform.system()
if 'Darwin' in current_os:
# macOS: Use SystemConfiguration framework to get the current
# console user
from SystemConfiguration import SCDynamicStoreCopyConsoleUser
cfuser = SCDynamicStoreCopyConsoleUser(None, None, None)
return cfuser[0]
if 'Windows' in current_os:
from win32api import GetUserName
return GetUserName()
if 'Linux' in current_os:
from getpass import getuser
return getuser()
# Exception classes used by this module.
class AdobeAPINoUserException(Exception):
"""Given user does not exist."""
def __init__(self, username):
"""Store the user that doesn't exist."""
self.username = username
def __str__(self):
"""String for the username."""
return "No user found for '%s' " % str(self.username)
class AdobeAPINoProductException(Exception):
"""Given product does not exist."""
def __init__(self, product):
"""Store the product that doesn't exist."""
self.product = product
def __str__(self):
"""String for the product."""
return "No product configuration for '%s'" % str(self.product)
class AdobeAPIBadStatusException(Exception):
"""Received a non-200 code from the API."""
def __init__(self, status_code, headers, text):
"""Store the product that doesn't exist."""
self.status_code = status_code
self.headers = headers
self.text = text
def __str__(self):
"""Text for the error."""
return 'Status code %s: %s' % (self.status_code, str(self.text))
def __int__(self):
"""Return status code of the error."""
return int(self.status_code)
class AdobeAPIIncompleteUserActionException(Exception):
"""User manipulation action returned an incomplete."""
def __init__(self, errors):
"""Store the error generated from the incomplete."""
self.errors = errors
def __str__(self):
"""Text for the error."""
return str(self.errors)
class AdobeAPIMissingRequirementsException(Exception):
"""Missing a required file for API usage."""
def __init__(self, filename):
"""Store the filename that is missing."""
self.filename = filename
def __str__(self):
"""Text for the error."""
return 'Required file is missing: %s' % str(self.filename)
class AdobeAPIObject(object):
"""Model to represent an Adobe API interface."""
def __init__(
self,
username="%s@fb.com" % get_console_user(),
private_key_filename=PRIVATE_KEY_DEFAULT_LOC,
userconfig=USERCONFIG_DEFAULT_LOC,
cache_path=CACHE_DEFAULT_LOC,
cache=True,
key='email',
allow_nonexistent_user=False,
splay=random.randrange(-144, 144),
):
"""
Instantiate class variables for our API object model.
'username' defaults to the current logged in user on all platforms.
'private_key_filename', 'userconfig', and 'cache_path' will default to
the constants defined above if not provided.
'cache' defaults to True to consume available cache data, and to store
the data in local cache. False will not cache and ignores any local
cache file.
The cache path is defined in the constant above.
'key' must be either 'email' or 'username', and determines what field
to match the incoming data off of. By default, this is the 'email'
field.
'allow_nonexistent_user' will not trigger an exception if you try to
perform an action on a user that does not exist. This is useful for
determining if a user exists, or querying lists of product configs,
where you don't actually need to interact with a user to do so.
'splay' is a number of hours added to the cache length. By default,
this is a random value between -144 and 144 hours, so that machines
don't all invalidate their cache and query the API endpoint at the
same time.
This can be confusing because regardless of key choice, 'username' is
used to indicate the unique user.
"""
self.configs = {}
self.productlist = []
self.userlist = []
self.cache_path = cache_path
self.user = {}
self.username = username
self.cache = cache
self.key = key
self.allow_fake = allow_nonexistent_user
self.splay = splay
if self.cache:
self.__read_cache()
# Generate the access configs in case we need them later
self.__generate_config(
userconfig=userconfig,
private_key_filename=private_key_filename
)
if not self.user:
# Cache didn't have values we need, so let's query the API
self.gather_user()
if not self.productlist:
self.gather_product_list(force=True)
if self.cache:
self.__write_cache()
# CONFIG
def __get_private_key(self, priv_key_filename):
"""Retrieve private key from file."""
priv_key_file = open(priv_key_filename)
priv_key = priv_key_file.read()
priv_key_file.close()
return priv_key
def __get_user_config(self, filename=None):
"""Retrieve config data from file."""
config = RawConfigParser()
config.read(filename)
config_dict = {
# server parameters
'host': config.get("server", "host"),
'endpoint': config.get("server", "endpoint"),
'ims_host': config.get("server", "ims_host"),
'ims_endpoint_jwt': config.get("server", "ims_endpoint_jwt"),
# enterprise parameters used to construct JWT
'domain': config.get("enterprise", "domain"),
'org_id': config.get("enterprise", "org_id"),
'api_key': config.get("enterprise", "api_key"),
'client_secret': config.get("enterprise", "client_secret"),
'tech_acct': config.get("enterprise", "tech_acct"),
'priv_key_filename': config.get("enterprise", "priv_key_filename"),
}
self.configs = config_dict
def __prepare_jwt_token(self):
"""Construct the JSON Web Token for auth."""
# set expiry time for JSON Web Token
expiry_time = int(time.time()) + 60 * 60 * 24
# create payload
payload = {
"exp": expiry_time,
"iss": self.configs['org_id'],
"sub": self.configs['tech_acct'],
"aud": (
"https://" +
self.configs['ims_host'] +
"/c/" +
self.configs['api_key']
),
(
"https://" +
self.configs['ims_host'] +
"/s/" +
"ent_user_sdk"
): True
}
# create JSON Web Token
jwt_token = jwt.encode(payload, self.priv_key, algorithm='RS256')
# decode bytes into string
jwt_token = jwt_token.decode("utf-8")
return jwt_token
def __prepare_access_token(self, config_data, jwt_token):
"""Generate the access token."""
# Method parameters
url = "https://" + config_data['ims_host'] + \
config_data['ims_endpoint_jwt']
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Cache-Control": "no-cache"
}
body_credentials = {
"client_id": config_data['api_key'],
"client_secret": config_data['client_secret'],
"jwt_token": jwt_token
}
body = urlencode(body_credentials)
# send http request
res = requests.post(url, headers=headers, data=body)
# evaluate response
if res.status_code == 200:
# extract token
access_token = json.loads(res.text)["access_token"]
return access_token
else:
raise AdobeAPIBadStatusException(
res.status_code, res.headers, res.text
)
def __generate_config(self, userconfig, private_key_filename):
"""Return tuple of necessary config data."""
# Get userconfig data
user_config_path = userconfig
if not os.path.isfile(str(user_config_path)):
raise AdobeAPIMissingRequirementsException(str(user_config_path))
# Get private key
priv_key_path = private_key_filename
if not os.path.isfile(str(priv_key_path)):
raise AdobeAPIMissingRequirementsException(str(priv_key_path))
self.priv_key = self.__get_private_key(priv_key_path)
# Get config data
self.__get_user_config(user_config_path)
# Get the JWT
try:
self.jwt_token = self.__prepare_jwt_token()
except NotImplementedError:
print(
"Cryptography module was unable to succeed on your machine.",
file=sys.stderr)
raise
# Get the access token
self.access_token = self.__prepare_access_token(
self.configs,
self.jwt_token
)
def __headers(self, config_data, access_token):
"""Return the headers needed."""
headers = {
"Content-type": "application/json",
"Accept": "application/json",
"x-api-key": config_data['api_key'],
"Authorization": "Bearer " + access_token
}
return headers
# REQUEST INTERACTION FUNCTIONS
def __submit_request(self, url):
"""
Submit a request to the API endpoint.
Returns a JSON dictionary of the result.
If a non-200 status is returned, raise an AdobeAPIBadStatusException.
"""
res = requests.get(
url,
headers=self.__headers(self.configs, self.access_token)
)
if res.status_code != 200:
raise AdobeAPIBadStatusException(
res.status_code,
res.headers,
res.text
)
return json.loads(res.text)
def _submit_user_action_request(self, body_dict):
"""
Submit a JSON request to the User Action API.
Returns True if the action succeeded.
If the action was not completed, raise
AdobeAPIIncompleteUserActionException.
"""
success = False
body = json.dumps([body_dict])
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/action/" + \
self.configs['org_id']
res = requests.post(
url,
headers=self.__headers(self.configs, self.access_token),
data=body
)
if res.status_code != 200:
raise AdobeAPIBadStatusException(
res.status_code,
res.headers,
res.text
)
results = json.loads(res.text)
if results.get('notCompleted') == 1:
raise AdobeAPIIncompleteUserActionException(
results.get('errors')
)
if results.get('completed') == 1:
success = True
self.update_user()
return success
# CACHE FUNCTIONS
def __read_cache(self):
"""Read the values from the cache file."""
cache_data = {}
try:
# Invalidate the cache automatically after 2 weeks, plus splay
file_age = os.path.getmtime(self.cache_path)
# Splay is a number of hours added to the cache invalidation time
# It can be negative, so that clients don't all hit at once.
splay_seconds = 60 * 60 * int(self.splay)
two_weeks = (60 * 60 * 24 * 14)
if time.time() - file_age < (two_weeks + splay_seconds):
with open(self.cache_path, 'rb') as f:
cache_data = json.load(f)
except (OSError, IOError, ValueError):
# Cache doesn't exist, or is invalid
self.user = {}
return
productlist = cache_data.get('productlist', [])
if productlist:
self.productlist = productlist
userlist = cache_data.get('userlist', [])
if userlist:
self.userlist = userlist
user_data = cache_data.get('user_data', {})
if user_data and user_data.get(self.key) == self.username:
self.user = user_data
else:
# Look through the userlist to see if we find the username.
# If not, the result is an empty dict anyway.
self.user = self.data()
def __write_cache(self):
"""Write the values to the cache file."""
cache_data = {}
cache_data['productlist'] = self.productlist or []
cache_data['userlist'] = self.userlist or []
cache_data['user_data'] = self.user or {}
try:
with open(self.cache_path, 'wb') as f:
json.dump(cache_data, f, indent=True, sort_keys=True)
except IOError:
# If we fail to write cache, it just means we check again next time
pass
# GATHERING DATA FROM THE API
# These functions all must query the API (directly or indirectly) for info
# not available from the cache, and are therefore expensive.
def gather_product_list(self, force=False):
"""
Get the list of product configurations by asking the API.
Returns 'productlist', which is a list of dictionaries containing all
the Configuration groups in use.
If 'force' is true, the API call will be made regardless of cache.
If a non-200 status code is returned by the API, an exception is
raised.
Example:
```
>>>> api.productlist[0]
{u'memberCount': 182, u'groupName': u'Administrators'}
>>> api.productlist[1]
{u'memberCount': 912,
u'groupName':
u'Default Document Cloud for enterprise - Pro Configuration'}
```
"""
if force or not self.productlist:
page = 0
result = {}
productlist = []
while result.get('lastPage', False) is not True:
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/groups/" + \
self.configs['org_id'] + "/" + str(page)
try:
result = self.__submit_request(url)
productlist += result.get('groups', [])
page += 1
except AdobeAPIBadStatusException:
raise
self.productlist = productlist
# Update the cache
if self.cache:
self.__write_cache()
return self.productlist
def gather_user_list(self, force=False):
"""
Get a list of all users by querying the API.
Returns 'userlist', which is a list of dictionaries containing all the
users in our org.
If 'force' is true, the API call will be made regardless of cache.
If a non-200 status code is returned by the API, an exception is
raised.
Example:
```
>>> api.userlist[0]
{u'status':
u'active', u'username': u'email@fb.com', u'domain': u'fb.com',
u'firstname': u'Fake Firstname', u'lastname': u'Fake Lastname',
u'groups': [
u'Default Document Cloud for enterprise - Pro Configuration',
u'Default All Apps plan - 100 GB Configuration',
u'Default Illustrator CC - 0 GB Configuration',
u'Default InDesign CC - 0 GB Configuration',
u'Default Photoshop CC - 0 GB Configuration'],
u'country': u'US', u'type': u'federatedID', u'email': u'email@fb.com'}
"""
if force or not self.userlist:
page = 0
result = {}
userlist = []
while result.get('lastPage', False) is not True:
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/users/" + \
self.configs['org_id'] + "/" + str(page)
try:
result = self.__submit_request(url)
userlist += result.get('users', [])
page += 1
except AdobeAPIBadStatusException:
raise
self.userlist = userlist
# Update the cache
if self.cache:
self.__write_cache()
return self.userlist
def users_of_product(self, product_config_name):
"""
Get a list of users of a specific configuration by querying the API.
'userlist' is a list of dictionaries containing the user data of each
user who is a member of that product configuration group.
If a non-200 status code is returned by the API, an exception is
raised.
Example:
```
>>> api.users_of_product(
'Default Document Cloud for enterprise - Pro Configuration')[0]
{u'status': u'active', u'username': u'email@fb.com',
u'domain': u'fb.com', u'firstname': u'Fake', u'lastname': u'Fake',
u'country': u'US', u'type': u'federatedID', u'email': u'email@fb.com'}
```
This data is not cached, so it is an expensive call each time.
"""
page = 0
result = {}
userlist = []
while result.get('lastPage', False) is not True:
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/users/" + \
self.configs['org_id'] + "/" + str(page) + "/" + \
quote(product_config_name)
try:
result = self.__submit_request(url)
userlist += result.get('users', [])
page += 1
except AdobeAPIBadStatusException as e:
error = json.loads(e.text)
if 'group.not_found' in error['result']:
# Invalid product name
raise AdobeAPINoProductException(product_config_name)
else:
raise
return userlist
def data(self):
"""Get the data for the user from the userlist."""
for user in self.userlist:
if user[self.key] == self.username:
return user
# If we get here, there was no matching username
return {}
def gather_user(self):
"""
Gather data about the user by querying the API.
Returns a dictionary containing the user data.
If a non-200 status code is returned by the API, an exception is
raised.
This data is cached, but this function does not read from the cache;
it will always fetch from the API.
If the user does not exist and 'allow_nonexistent_user' was not set to
True, this raises an AdobeAPINoUserException.
"""
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/organizations/" + \
self.configs['org_id'] + "/users/" + str(self.username)
try:
result = self.__submit_request(url)
self.user = result.get('user', {})
except AdobeAPIBadStatusException:
if self.allow_fake:
self.user = {}
return
raise AdobeAPINoUserException(self.username)
# USER SPECIFIC FUNCTIONS
# These convenience functions are all based on the user that the object was
# instantiated with.
def list_products(self):
"""Return the list of products for the current user."""
return self.user.get('groups', [])
def is_federated(self):
"""Return True if user is federated."""
return self.user.get('type') == 'federatedID'
def has_product(self, product_name):
"""Return True if user has the product config."""
return product_name in self.list_products()
def update_user(self):
"""Force update the user information."""
# Rebuild the userlist for updated information
self.gather_user()
if self.cache:
self.__write_cache()
# PRODUCT SPECIFIC FUNCTIONS
# These are not at all related to the user, and do not require a real user.
def product_exists(self, productname):
"""Return True if a product config exists."""
if not self.productlist:
self.gather_product_list()
for product in self.productlist:
if productname == product.get('groupName', ''):
return True
return False
# ACTION FUNCTIONS
# These functions are actions you can take on the user, which require
# posting data to the API.
def add_federated_user(self, email, country, firstname, lastname):
"""Add Federated user to organization."""
add_dict = {
'user': self.username,
'do': [
{
'createFederatedID': {
'email': email,
'country': country,
'firstname': firstname,
'lastname': lastname,
}
}
]
}
result = self._submit_user_action_request(add_dict)
return result
def update_user_information(self, email, country, firstname, lastname):
"""Update the existing user's information."""
add_dict = {
'user': self.username,
'do': [
{
'update': {
}
}
]
}
if email:
add_dict['do'][0]['update']['email'] = email
if country:
add_dict['do'][0]['update']['country'] = country
if firstname:
add_dict['do'][0]['update']['firstname'] = firstname
if lastname:
add_dict['do'][0]['update']['lastname'] = lastname
result = self._submit_user_action_request(add_dict)
return result
def remove_user_from_org(self):
"""Remove user from organization."""
if not self.user:
raise AdobeAPINoUserException(self.username)
remove_dict = {
'user': self.username,
'do': [
{
'removeFromOrg': {}
}
]
}
result = self._submit_user_action_request(remove_dict)
return result
def add_products_to_user(self, products):
"""Add product configs to username."""
# Is username in the organization?
if not self.user:
raise AdobeAPINoUserException(self.username)
# Is the product real?
if isinstance(products, basestring): # NOQA
products = [products]
for product in products:
if not self.product_exists(product):
raise AdobeAPINoProductException(product)
add_dict = {
'user': self.username,
'do': [
{
'add': {
'product': products
}
}
]
}
return self._submit_user_action_request(add_dict)
def remove_product_from_user(self, products):
"""Remove products from username."""
# Is username in the organization?
if not self.user:
raise AdobeAPINoUserException(self.username)
if isinstance(products, basestring): # NOQA
products = [products]
# Is the product real?
for product in products:
if not self.product_exists(product):
raise AdobeAPINoProductException(product)
add_dict = {
'user': self.username,
'do': [
{
'remove': {
'product': products
}
}
]
}
return self._submit_user_action_request(add_dict)
# END CLASS
| 35.441467 | 79 | 0.571036 |
d6276711d4c49a27d2f9562cc7f9e7e4f98eb86d | 1,482 | py | Python | yt/visualization/tests/test_commons.py | lconaboy/yt | d97c3cf6d7911cd12b8337784d3232068ebc59f6 | [
"BSD-3-Clause-Clear"
] | 360 | 2017-04-24T05:06:04.000Z | 2022-03-31T10:47:07.000Z | yt/visualization/tests/test_commons.py | chrishavlin/yt | 023680e3a7bd1000d601727e02a55e72b4cbdc75 | [
"BSD-3-Clause-Clear"
] | 2,077 | 2017-04-20T20:36:07.000Z | 2022-03-31T16:39:43.000Z | yt/visualization/tests/test_commons.py | chrishavlin/yt | 023680e3a7bd1000d601727e02a55e72b4cbdc75 | [
"BSD-3-Clause-Clear"
] | 257 | 2017-04-19T20:52:28.000Z | 2022-03-29T12:23:52.000Z | import pytest
from yt.visualization._commons import validate_image_name
@pytest.mark.parametrize(
"name, expected",
[
("noext", "noext.png"),
("nothing.png", "nothing.png"),
("nothing.pdf", "nothing.pdf"),
("version.1.2.3", "version.1.2.3.png"),
],
)
def test_default(name, expected):
result = validate_image_name(name)
assert result == expected
@pytest.mark.parametrize(
"name, suffix, expected",
[
("noext", ".png", "noext.png"),
("noext", None, "noext.png"),
("nothing.png", ".png", "nothing.png"),
("nothing.png", None, "nothing.png"),
("nothing.png", ".pdf", "nothing.pdf"),
("nothing.pdf", ".pdf", "nothing.pdf"),
("nothing.pdf", None, "nothing.pdf"),
("nothing.pdf", ".png", "nothing.png"),
("version.1.2.3", ".png", "version.1.2.3.png"),
("version.1.2.3", None, "version.1.2.3.png"),
("version.1.2.3", ".pdf", "version.1.2.3.pdf"),
],
)
@pytest.mark.filterwarnings(
r"ignore:Received two valid image formats '\w+' \(from filename\) "
r"and '\w+' \(from suffix\). The former is ignored.:UserWarning"
)
def test_custom_valid_ext(name, suffix, expected):
result1 = validate_image_name(name, suffix=suffix)
assert result1 == expected
if suffix is not None:
alt_suffix = suffix.replace(".", "")
result2 = validate_image_name(name, suffix=alt_suffix)
assert result2 == expected
| 30.875 | 71 | 0.585695 |
0d11df603e120f69d1b4d9c103acf1e1a0753272 | 3,210 | py | Python | metapredict/tests/test_uniprot_functionality.py | alexpmagalhaes/metapredict | 2041787a4f48d6d7c98fb3d396455d059e67a08e | [
"MIT"
] | 5 | 2021-06-02T16:32:13.000Z | 2022-02-02T13:18:35.000Z | metapredict/tests/test_uniprot_functionality.py | alexpmagalhaes/metapredict | 2041787a4f48d6d7c98fb3d396455d059e67a08e | [
"MIT"
] | 2 | 2021-08-31T13:13:39.000Z | 2022-02-15T22:32:46.000Z | metapredict/tests/test_uniprot_functionality.py | alexpmagalhaes/metapredict | 2041787a4f48d6d7c98fb3d396455d059e67a08e | [
"MIT"
] | 2 | 2021-09-21T23:45:56.000Z | 2022-02-02T12:01:59.000Z | """
Unit and regression test for the metapredict package.
This is extremely underdone at this point... Sorry about that :'(
"""
# Import package, test suite, and other packages as needed
from metapredict import meta
from metapredict.metapredict_exceptions import MetapredictError
import pytest
import sys
import os
import numpy as np
current_filepath = os.getcwd()
fasta_filepath = "{}/testing.fasta".format(current_filepath)
P53_UID = 'P04637'
def test_metapredict_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "metapredict" in sys.modules
# ....................................................................................
#
def test_predict_disorder_uniprot():
# checks that this fails when an invalid uniprot accession is passed
with pytest.raises(MetapredictError):
meta.predict_disorder_uniprot('aaaa')
# checks that when we pull p53 we get 393 residues of sweet,
# sweet disorder prediction
assert len(meta.predict_disorder_uniprot(P53_UID)) == 393
# check summed disorder is right
assert np.sum(meta.predict_disorder_uniprot(P53_UID)) == 172.965
# check summed disorder is right when we don't normalize (these are not magic values,
# just the expected 'truth' for the 1.0 release
assert np.sum(meta.predict_disorder_uniprot(P53_UID,normalized=False)) == 173.524
# ....................................................................................
#
def test_graph_disorder_uniprot_():
# checks that this fails when an invalid uniprot accession is passed
with pytest.raises(MetapredictError):
meta.graph_disorder_uniprot('aaaa')
# probably should have some tests here...?
# ....................................................................................
#
def test_predict_disorder_domains_uniprot_():
# checks that this fails when an invalid uniprot accession is passed
with pytest.raises(MetapredictError):
meta.predict_disorder_domains_uniprot('aaaa')
# checks that when we pull p53 we get 393 residues of sweet,
# sweet disorder prediction
dis_domains = meta.predict_disorder_domains_uniprot(P53_UID)
assert len(dis_domains[0]) == 393
assert len(dis_domains[1]) == 393
assert np.sum(dis_domains[0]) == 172.965
assert np.sum(dis_domains[1]) == 173.04537763974946
# did we find 2 IDRs
assert len(dis_domains[2]) == 2
# IDR1
assert dis_domains[2][0][2] == 'MEEPQSDPSVEPPLSQETFSDLWKLLPENNVLSPLPSQAMDDLMLSPDDIEQWFTEDPGPDEAPRMPEAAPPVAPAPAAPTPAAPAPAPSWPLSSSVPSQKT'
assert dis_domains[2][0][0] == 0
assert dis_domains[2][0][1] == 102
# IDR2
assert dis_domains[2][1][2] == 'PGRDRRTEEENLRKKGEPHHELPPGSTKRALPNNTSSSPQPKKKPLDGEYFTLQIRGRERFEMFRELNEALELKDAQAGKEPGGSRAHSSHLKSKKGQSTSRHKKLMFKTEGPDSD'
assert dis_domains[2][1][0] == 277
assert dis_domains[2][1][1] == 393
# FD1
assert dis_domains[3][0][2] == 'YQGSYGFRLGFLHSGTAKSVTCTYSPALNKMFCQLAKTCPVQLWVDSTPPPGTRVRAMAIYKQSQHMTEVVRRCPHHERCSDSDGLAPPQHLIRVEGNLRVEYLDDRNTFRHSVVVPYEPPEVGSDCTTIHYNYMCNSSCMGGMNRRPILTIITLEDSSGNLLGRNSFEVRVCAC'
assert dis_domains[3][0][0] == 102
assert dis_domains[3][0][1] == 277
| 33.4375 | 212 | 0.686293 |
4b0194beef66d0b5acd486c4a44ea6149ec670d8 | 13,692 | py | Python | desktop/core/ext-py/Twisted/twisted/web/test/test_proxy.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 19 | 2015-05-01T19:59:03.000Z | 2021-12-09T08:03:16.000Z | desktop/core/ext-py/Twisted/twisted/web/test/test_proxy.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 1 | 2018-01-03T15:26:49.000Z | 2018-01-03T15:26:49.000Z | desktop/core/ext-py/Twisted/twisted/web/test/test_proxy.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 30 | 2015-03-25T19:40:07.000Z | 2021-05-28T22:59:26.000Z | # Copyright (c) 2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test for L{twisted.web.proxy}.
"""
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import StringTransportWithDisconnection
from twisted.internet.error import ConnectionDone
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.web.proxy import ReverseProxyResource, ProxyClientFactory
from twisted.web.proxy import ProxyClient, ProxyRequest, ReverseProxyRequest
class FakeReactor(object):
"""
A fake reactor to be used in tests.
@ivar connect: a list that keeps track of connection attempts (ie, calls
to C{connectTCP}).
@type connect: C{list}
"""
def __init__(self):
"""
Initialize the C{connect} list.
"""
self.connect = []
def connectTCP(self, host, port, factory):
"""
Fake L{reactor.connectTCP}, that does nothing but log the call.
"""
self.connect.append([host, port, factory])
class ReverseProxyResourceTestCase(TestCase):
"""
Tests for L{ReverseProxyResource}.
"""
def _testRender(self, uri, expectedURI):
"""
Check that a request pointing at C{uri} produce a new proxy connection,
with the path of this request pointing at C{expectedURI}.
"""
root = Resource()
reactor = FakeReactor()
resource = ReverseProxyResource("127.0.0.1", 1234, "/path", reactor)
root.putChild('index', resource)
site = Site(root)
transport = StringTransportWithDisconnection()
channel = site.buildProtocol(None)
channel.makeConnection(transport)
# Clear the timeout if the tests failed
self.addCleanup(channel.connectionLost, None)
channel.dataReceived("GET %s HTTP/1.1\r\nAccept: text/html\r\n\r\n" %
(uri,))
# Check that one connection has been created, to the good host/port
self.assertEquals(len(reactor.connect), 1)
self.assertEquals(reactor.connect[0][0], "127.0.0.1")
self.assertEquals(reactor.connect[0][1], 1234)
# Check the factory passed to the connect, and its given path
factory = reactor.connect[0][2]
self.assertIsInstance(factory, ProxyClientFactory)
self.assertEquals(factory.rest, expectedURI)
self.assertEquals(factory.headers["host"], "127.0.0.1:1234")
def test_render(self):
"""
Test that L{ReverseProxyResource.render} initiates a connection to the
given server with a L{ProxyClientFactory} as parameter.
"""
return self._testRender("/index", "/path")
def test_renderWithQuery(self):
"""
Test that L{ReverseProxyResource.render} passes query parameters to the
created factory.
"""
return self._testRender("/index?foo=bar", "/path?foo=bar")
def test_getChild(self):
"""
The L{ReverseProxyResource.getChild} method should return a resource
instance with the same class as the originating resource, forward port
and host values, and update the path value with the value passed.
"""
resource = ReverseProxyResource("127.0.0.1", 1234, "/path")
child = resource.getChild('foo', None)
# The child should keep the same class
self.assertIsInstance(child, ReverseProxyResource)
self.assertEquals(child.path, "/path/foo")
self.assertEquals(child.port, 1234)
self.assertEquals(child.host, "127.0.0.1")
def test_getChildWithSpecial(self):
"""
The L{ReverseProxyResource} return by C{getChild} has a path which has
already been quoted.
"""
resource = ReverseProxyResource("127.0.0.1", 1234, "/path")
child = resource.getChild(' /%', None)
self.assertEqual(child.path, "/path/%20%2F%25")
class DummyParent(object):
"""
A dummy parent request that holds a channel and its transport.
@ivar channel: the request channel.
@ivar transport: the transport of the channel.
"""
def __init__(self, channel):
"""
Hold a reference to the channel and its transport.
"""
self.channel = channel
self.transport = channel.transport
class DummyChannel(object):
"""
A dummy HTTP channel, that does nothing but holds a transport and saves
connection lost.
@ivar transport: the transport used by the client.
@ivar lostReason: the reason saved at connection lost.
"""
def __init__(self, transport):
"""
Hold a reference to the transport.
"""
self.transport = transport
self.lostReason = None
def connectionLost(self, reason):
"""
Keep track of the connection lost reason.
"""
self.lostReason = reason
class ProxyClientTestCase(TestCase):
"""
Tests for L{ProxyClient}.
"""
def _testDataForward(self, data, method="GET", body=""):
"""
Build a fake proxy connection, and send C{data} over it, checking that
it's forwarded to the originating request.
"""
# Connect everything
clientTransport = StringTransportWithDisconnection()
serverTransport = StringTransportWithDisconnection()
channel = DummyChannel(serverTransport)
parent = DummyParent(channel)
serverTransport.protocol = channel
client = ProxyClient(method, '/foo', 'HTTP/1.0',
{"accept": "text/html"}, body, parent)
clientTransport.protocol = client
client.makeConnection(clientTransport)
# Check data sent
self.assertEquals(clientTransport.value(),
"%s /foo HTTP/1.0\r\n"
"connection: close\r\n"
"accept: text/html\r\n\r\n%s" % (method, body))
# Fake an answer
client.dataReceived(data)
# Check that the data has been forwarded
self.assertEquals(serverTransport.value(), data)
clientTransport.loseConnection()
self.assertIsInstance(channel.lostReason, ConnectionDone)
def test_forward(self):
"""
When connected to the server, L{ProxyClient} should send the saved
request, with modifications of the headers, and then forward the result
to the parent request.
"""
return self._testDataForward("200 OK\r\nFoo: bar\r\n\r\nSome data\r\n")
def test_postData(self):
"""
Try to post content in the request, and check that the proxy client
forward the body of the request.
"""
return self._testDataForward(
"200 OK\r\nFoo: bar\r\n\r\nSome data\r\n", "POST", "Some content")
def test_statusWithMessage(self):
"""
If the response contains a status with a message, it should be
forwarded to the parent request with all the information.
"""
return self._testDataForward("404 Not Found\r\n")
def test_headersCleanups(self):
"""
The headers given at initialization should be modified:
B{proxy-connection} should be removed if present, and B{connection}
should be added.
"""
client = ProxyClient('GET', '/foo', 'HTTP/1.0',
{"accept": "text/html", "proxy-connection": "foo"}, '', None)
self.assertEquals(client.headers,
{"accept": "text/html", "connection": "close"})
class ProxyClientFactoryTestCase(TestCase):
"""
Tests for L{ProxyClientFactory}.
"""
def test_connectionFailed(self):
"""
Check that L{ProxyClientFactory.clientConnectionFailed} produces
a B{501} response to the parent request.
"""
serverTransport = StringTransportWithDisconnection()
channel = DummyChannel(serverTransport)
parent = DummyParent(channel)
serverTransport.protocol = channel
factory = ProxyClientFactory('GET', '/foo', 'HTTP/1.0',
{"accept": "text/html"}, '', parent)
factory.clientConnectionFailed(None, None)
self.assertEquals(serverTransport.value(),
"HTTP/1.0 501 Gateway error\r\n"
"Content-Type: text/html\r\n\r\n"
"<H1>Could not connect</H1>")
self.assertIsInstance(channel.lostReason, ConnectionDone)
def test_buildProtocol(self):
"""
L{ProxyClientFactory.buildProtocol} should produce a L{ProxyClient}
with the same values of attributes (with updates on the headers).
"""
factory = ProxyClientFactory('GET', '/foo', 'HTTP/1.0',
{"accept": "text/html"}, 'Some data',
None)
proto = factory.buildProtocol(None)
self.assertIsInstance(proto, ProxyClient)
self.assertEquals(proto.command, 'GET')
self.assertEquals(proto.rest, '/foo')
self.assertEquals(proto.data, 'Some data')
self.assertEquals(proto.headers,
{"accept": "text/html", "connection": "close"})
class ProxyRequestTestCase(TestCase):
"""
Tests for L{ProxyRequest}.
"""
def _testProcess(self, uri, expectedURI, method="GET", data=""):
"""
Build a request pointing at C{uri}, and check that a proxied request
is created, pointing a C{expectedURI}.
"""
transport = StringTransportWithDisconnection()
channel = DummyChannel(transport)
reactor = FakeReactor()
request = ProxyRequest(channel, False, reactor)
request.gotLength(len(data))
request.handleContentChunk(data)
request.requestReceived(method, 'http://example.com%s' % (uri,),
'HTTP/1.0')
self.assertEquals(len(reactor.connect), 1)
self.assertEquals(reactor.connect[0][0], "example.com")
self.assertEquals(reactor.connect[0][1], 80)
factory = reactor.connect[0][2]
self.assertIsInstance(factory, ProxyClientFactory)
self.assertEquals(factory.command, method)
self.assertEquals(factory.version, 'HTTP/1.0')
self.assertEquals(factory.headers, {'host': 'example.com'})
self.assertEquals(factory.data, data)
self.assertEquals(factory.rest, expectedURI)
self.assertEquals(factory.father, request)
def test_process(self):
"""
L{ProxyRequest.process} should create a connection to the given server,
with a L{ProxyClientFactory} as connection factory, with the correct
parameters:
- forward comment, version and data values
- update headers with the B{host} value
- remove the host from the URL
- pass the request as parent request
"""
return self._testProcess("/foo/bar", "/foo/bar")
def test_processWithoutTrailingSlash(self):
"""
If the incoming request doesn't contain a slash,
L{ProxyRequest.process} should add one when instantiating
L{ProxyClientFactory}.
"""
return self._testProcess("", "/")
def test_processWithData(self):
"""
L{ProxyRequest.process} should be able to retrieve request body and
to forward it.
"""
return self._testProcess(
"/foo/bar", "/foo/bar", "POST", "Some content")
def test_processWithPort(self):
"""
Check that L{ProxyRequest.process} correctly parse port in the incoming
URL, and create a outgoing connection with this port.
"""
transport = StringTransportWithDisconnection()
channel = DummyChannel(transport)
reactor = FakeReactor()
request = ProxyRequest(channel, False, reactor)
request.gotLength(0)
request.requestReceived('GET', 'http://example.com:1234/foo/bar',
'HTTP/1.0')
# That should create one connection, with the port parsed from the URL
self.assertEquals(len(reactor.connect), 1)
self.assertEquals(reactor.connect[0][0], "example.com")
self.assertEquals(reactor.connect[0][1], 1234)
class DummyFactory(object):
"""
A simple holder for C{host} and C{port} information.
"""
def __init__(self, host, port):
self.host = host
self.port = port
class ReverseProxyRequestTestCase(TestCase):
"""
Tests for L{ReverseProxyRequest}.
"""
def test_process(self):
"""
L{ReverseProxyRequest.process} should create a connection to its
factory host/port, using a L{ProxyClientFactory} instantiated with the
correct parameters, and particulary set the B{host} header to the
factory host.
"""
transport = StringTransportWithDisconnection()
channel = DummyChannel(transport)
reactor = FakeReactor()
request = ReverseProxyRequest(channel, False, reactor)
request.factory = DummyFactory("example.com", 1234)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
# Check that one connection has been created, to the good host/port
self.assertEquals(len(reactor.connect), 1)
self.assertEquals(reactor.connect[0][0], "example.com")
self.assertEquals(reactor.connect[0][1], 1234)
# Check the factory passed to the connect, and its headers
factory = reactor.connect[0][2]
self.assertIsInstance(factory, ProxyClientFactory)
self.assertEquals(factory.headers, {'host': 'example.com'})
| 33.558824 | 79 | 0.625621 |
6e27c0316208589f74854b8dd45eda05558df267 | 1,874 | py | Python | tabletoprandom/dice/enumdie.py | Kairosite/Tabletop-Randomness | be9d61c5ff29594610e2e1d55b89023c39696263 | [
"MIT"
] | null | null | null | tabletoprandom/dice/enumdie.py | Kairosite/Tabletop-Randomness | be9d61c5ff29594610e2e1d55b89023c39696263 | [
"MIT"
] | null | null | null | tabletoprandom/dice/enumdie.py | Kairosite/Tabletop-Randomness | be9d61c5ff29594610e2e1d55b89023c39696263 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Set, List, Final
from enum import EnumMeta, IntEnum, Enum, unique, auto
from tabletoprandom.abstract.dice import FairDie, NumericDie
import random
@unique
class ElementValue(Enum):
FIRE = auto()
WATER = auto()
AIR = auto()
EARTH = auto()
@staticmethod
def die_string() -> str:
return "E"
@unique
class FudgeValue(IntEnum):
PLUS = 1
BLANK = 0
MINUS = -1
@staticmethod
def die_string() -> str:
return "F"
class EnumDie(FairDie[Enum]):
face_enum: Final[EnumMeta]
def __init__(self, face_enum: EnumMeta) -> None:
self.face_enum = face_enum
@property
def faces(self) -> Set[Enum]:
return set(self.face_enum)
@property
def num_faces(self) -> int:
return len(self.face_enum)
@staticmethod
def quick_roll(face_enum: EnumMeta) -> Enum:
return random.choice(face_enum)
def __str__(self) -> str:
try:
return f"d{self.face_enum.die_string()}"
except AttributeError:
return f"d({self.face_enum.__name__})"
class IntEnumDie(EnumDie, NumericDie[IntEnum]):
face_enum: Final[EnumMeta]
def __init__(self, face_enum: EnumMeta) -> None:
if not IntEnum.__subclasscheck__(face_enum):
raise ValueError("An IntEnum has not been given")
self.face_enum = face_enum
@property
def face_order(self) -> List[IntEnum]:
"""Returns an ordered list of the die's faces"""
return sorted(list(self.face_enum))
@staticmethod
def quick_roll(face_enum: EnumMeta) -> IntEnum:
if not IntEnum.__subclasscheck__(face_enum):
raise ValueError("An IntEnum has not been given")
return random.choice(face_enum)
| 24.025641 | 61 | 0.65635 |
89a565b4b37fda82dbfb4000716bd52de94a5511 | 1,256 | py | Python | tests/integration/test_backward_compatibility/test_cte_distributed.py | chalice19/ClickHouse | 2f38e7bc5c2113935ab86260439bb543a1737291 | [
"Apache-2.0"
] | 8,629 | 2016-06-14T21:03:01.000Z | 2019-09-23T07:46:38.000Z | tests/integration/test_backward_compatibility/test_cte_distributed.py | chalice19/ClickHouse | 2f38e7bc5c2113935ab86260439bb543a1737291 | [
"Apache-2.0"
] | 4,335 | 2016-06-15T12:58:31.000Z | 2019-09-23T11:18:43.000Z | tests/integration/test_backward_compatibility/test_cte_distributed.py | chalice19/ClickHouse | 2f38e7bc5c2113935ab86260439bb543a1737291 | [
"Apache-2.0"
] | 1,700 | 2016-06-15T09:25:11.000Z | 2019-09-23T11:16:38.000Z | import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__, name="cte_distributed")
node1 = cluster.add_instance("node1", with_zookeeper=False)
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
image="yandex/clickhouse-server",
tag="21.7.3.14",
stay_alive=True,
with_installed_binary=True,
)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_cte_distributed(start_cluster):
node2.query(
"""
WITH
quantile(0.05)(cnt) as p05,
quantile(0.95)(cnt) as p95,
p95 - p05 as inter_percentile_range
SELECT
sum(cnt) as total_requests,
count() as data_points,
inter_percentile_range
FROM (
SELECT
count() as cnt
FROM remote('node{1,2}', numbers(10))
GROUP BY number
)"""
)
node1.query(
"""
WITH
quantile(0.05)(cnt) as p05,
quantile(0.95)(cnt) as p95,
p95 - p05 as inter_percentile_range
SELECT
sum(cnt) as total_requests,
count() as data_points,
inter_percentile_range
FROM (
SELECT
count() as cnt
FROM remote('node{1,2}', numbers(10))
GROUP BY number
)"""
)
| 19.936508 | 61 | 0.649682 |
a16dc1ff4e3a4b009cf917a2d9c01dffc73b3fa6 | 1,064 | py | Python | main.py | Bdev-G/prototipo_01 | 93d2e918b7c6a70c067664c946529477736e78f1 | [
"MIT"
] | null | null | null | main.py | Bdev-G/prototipo_01 | 93d2e918b7c6a70c067664c946529477736e78f1 | [
"MIT"
] | null | null | null | main.py | Bdev-G/prototipo_01 | 93d2e918b7c6a70c067664c946529477736e78f1 | [
"MIT"
] | null | null | null | Player = sprites.create(img("""
. . . . c c c b b b b b . . . .
. . c c b 4 4 4 4 4 4 b b b . .
. c c 4 4 4 4 4 5 4 4 4 4 b c .
. e 4 4 4 4 4 4 4 4 4 5 4 4 e .
e b 4 5 4 4 5 4 4 4 4 4 4 4 b c
e b 4 4 4 4 4 4 4 4 4 4 5 4 4 e
e b b 4 4 4 4 4 4 4 4 4 4 4 b e
. e b 4 4 4 4 4 5 4 4 4 4 b e .
8 7 e e b 4 4 4 4 4 4 b e e 6 8
8 7 2 e e e e e e e e e e 2 7 8
e 6 6 2 2 2 2 2 2 2 2 2 2 6 c e
e c 6 7 6 6 7 7 7 6 6 7 6 c c e
e b e 8 8 c c 8 8 c c c 8 e b e
e e b e c c e e e e e c e b e e
. e e b b 4 4 4 4 4 4 4 4 e e .
. . . c c c c c e e e e e . . .
"""),
SpriteKind.player)
def on_on_update():
if controller.right.is_pressed():
Player.x += 1.5
elif controller.left.is_pressed():
Player.x -= 1.5
elif controller.up.is_pressed():
Player.y -= 1.5
elif controller.down.is_pressed():
Player.y += 1.5
game.on_update(on_on_update)
| 34.322581 | 43 | 0.425752 |
b516e59a362cebf032278f26022a2f34d7d715d0 | 10,118 | py | Python | bot/commands.py | meooow25/cp-discord-bot | 4d25b51f9dc4dc44105a6cebeeaea9ef1191c8c1 | [
"MIT"
] | 11 | 2018-09-03T16:50:25.000Z | 2020-07-17T05:27:25.000Z | bot/commands.py | meooow25/cp-discord-bot | 4d25b51f9dc4dc44105a6cebeeaea9ef1191c8c1 | [
"MIT"
] | 5 | 2018-10-08T00:18:21.000Z | 2018-11-26T22:01:40.000Z | bot/commands.py | meooow25/cp-discord-bot | 4d25b51f9dc4dc44105a6cebeeaea9ef1191c8c1 | [
"MIT"
] | 1 | 2018-10-09T09:30:07.000Z | 2018-10-09T09:30:07.000Z | import copy
import logging
import time
from datetime import datetime, timedelta
from .discord import Channel
from . import command, paginator
logger = logging.getLogger(__name__)
@command.command(desc='Responds with boop')
async def beep(bot, args, message):
command.assert_arglen(args, 0, cmd=message.content)
reply = {'content': '*boop*'}
await bot.client.send_message(reply, message.channel_id)
@command.command(usage='help [cmd]',
desc='Displays information about commands. When `cmd` is provided, only displays '
'information about that command')
async def help(bot, args, message):
if not args:
reply = bot.help_message
await paginator.paginate_and_send(reply, bot, message.channel_id, per_page=4,
time_active=15 * 60, time_delay=2 * 60)
else:
command.assert_arglen(args, 1, cmd=message.content)
cmd_name = args.pop()
cmd = bot.command_map.get(cmd_name)
command.assert_not_none(cmd, msg=f'Unrecognized command "{cmd_name}"', cmd=message.content)
field = cmd.embed_field_rep()
field['name'] = 'Usage: ' + field['name']
reply = {
'embed': {
'title': cmd_name,
'fields': [field],
}
}
await bot.client.send_message(reply, message.channel_id)
@command.command(desc='Displays bot info')
async def info(bot, args, message):
command.assert_arglen(args, 0, cmd=message.content)
reply = bot.info_message
await bot.client.send_message(reply, message.channel_id)
@command.command(usage='next [cnt] [at] [cc] [cf] [px]',
desc='Displays future contests. If `cnt` is absent, displays the next contest. '
'If `all`, displays all upcoming contests. If `day`, displays contests '
'which start within the next 24 hours. Optional site filters can be used, '
'where `at` = *AtCoder*, `cc` = *CodeChef* and `cf` = *Codeforces*')
async def next(bot, args, message):
args = [arg.lower() for arg in args]
site_tag_to_name = {}
cnt = None
for arg in args:
name = bot.site_container.get_site_name(arg)
if name is not None:
site_tag_to_name[arg] = name
elif arg in ('all', 'day'):
command.assert_none(cnt, msg='More than 1 cnt argument', cmd=message.content)
cnt = arg
else:
raise command.IncorrectUsageException(msg=f'Unrecognized argument "{arg}"', cmd=message.content)
cnt = cnt or 1
if cnt == 'day':
start_max = datetime.now().timestamp() + timedelta(days=1).total_seconds()
contests = bot.site_container.get_future_contests_before(start_max, site_tag_to_name.keys())
logger.info(f'{len(contests)} contests fetched before {start_max}')
else:
contests = bot.site_container.get_future_contests_cnt(cnt, site_tag_to_name.keys())
logger.info(f'{len(contests)} contests fetched out of {cnt}')
if contests:
reply = create_message_from_contests(contests, cnt, site_tag_to_name.values(), bot.TIMEZONE)
await paginator.paginate_and_send(reply, bot, message.channel_id, per_page=bot.CONTESTS_PER_PAGE,
time_active=15 * 60, time_delay=2 * 60)
else:
reply = {'content': '*No contest found*'}
await bot.client.send_message(reply, message.channel_id)
def create_message_from_contests(contests, cnt, site_names, bot_timezone):
descs = []
for contest in contests:
start = datetime.fromtimestamp(contest.start, bot_timezone)
start = start.strftime('%d %b %y, %H:%M')
duration_days, rem_secs = divmod(contest.length, 60 * 60 * 24)
duration_hrs, rem_secs = divmod(rem_secs, 60 * 60)
duration_mins, rem_secs = divmod(rem_secs, 60)
duration = f'{duration_hrs}h {duration_mins}m'
if duration_days > 0:
duration = f'{duration_days}d ' + duration
descs.append((contest.name, contest.site_name, start, duration, contest.url))
max_site_name_len = max(len(desc[1]) for desc in descs)
max_duration_len = max(len(desc[3]) for desc in descs)
em = '\u2001'
def make_field(name, site_name, start, duration, url):
return {
'name': name,
'value': (f'`{site_name.ljust(max_site_name_len, em)}{em}|'
f'{em}{start}{em}|'
f'{em}{duration.rjust(max_duration_len, em)}{em}|'
f'{em}`[`link \u25F3`]({url} "Link to contest page")'),
}
if cnt == 'day':
title = 'Contests that start under 24 hours from now'
else:
title = 'Upcoming contests'
embed = {
'fields': [make_field(*desc) for desc in descs],
}
if site_names:
embed['description'] = 'Showing only: ' + ', '.join(name for name in site_names)
message = {
'content': f'*{title}*',
'embed': embed,
}
return message
@command.command(desc='Displays bot status')
async def status(bot, args, message):
command.assert_arglen(args, 0, cmd=message.content)
reply = copy.deepcopy(bot.status_message)
now = time.time()
uptime = (now - bot.client.start_time) / 3600
field1 = {
'name': 'Bot Uptime',
'value': f'Online since {uptime:.1f} hrs ago'
}
field2 = {
'name': 'Last Updated',
'value': '',
}
# TODO: Shift the code below to a member function of Site.
for site in bot.site_container.sites:
last = (now - site.contests_last_fetched) / 60
field2['value'] += f'{site.NAME}: {last:.0f} mins ago\n'
reply['embed']['fields'] += [field1, field2]
await bot.client.send_message(reply, message.channel_id)
@command.command(usage='showsub [at|cc|cf]',
desc='Show registered profiles. A particular site can be specified',
allow_dm=True)
async def showsub(bot, args, message):
user_id = message.author.id
user = bot.entity_manager.get_user(user_id)
if not args:
if user is None:
reply = {'content': f'*You are not subscribed to any site*'}
await bot.client.send_message(reply, message.channel_id)
return
embed = user.get_all_profiles_embed()
if not embed:
reply = {'content': f'*You are not subscribed to any site*'}
await bot.client.send_message(reply, message.channel_id)
return
reply = {
'content': '*Your registered profiles:*',
'embed': embed,
}
await bot.client.send_message(reply, message.channel_id)
return
command.assert_arglen(args, 1, cmd=message.content)
site_tag = args[0].lower()
site_name = bot.site_container.get_site_name(site_tag)
command.assert_not_none(site_name, msg='Unrecognized site', cmd=message.content)
if user is None:
reply = {'content': f'*You are not subscribed to {site_name}*'}
await bot.client.send_message(reply, message.channel_id)
return
profile = bot.entity_manager.get_user(user_id).get_profile_for_site(site_tag)
if profile is None:
reply = {'content': f'*You are not subscribed to {site_name}*'}
await bot.client.send_message(reply, message.channel_id)
return
embed = bot.entity_manager.get_user(user_id).get_profile_embed(site_tag)
reply = {'embed': embed}
await bot.client.send_message(reply, message.channel_id)
@command.command(usage='sub at|cc|cf handle',
desc='Subscribe to profile changes',
allow_dm=True)
async def sub(bot, args, message):
command.assert_arglen(args, 2, cmd=message.content)
user_id = message.author.id
site_tag = args[0].lower()
site_name = bot.site_container.get_site_name(site_tag)
command.assert_not_none(site_name, msg='Unrecognized site', cmd=message.content)
await bot.client.trigger_typing(message.channel_id)
handle = args[1]
profile = await bot.site_container.fetch_profile(handle, site_tag=site_tag)
if profile is None:
reply = {'content': '*No user found with given handle*'}
await bot.client.send_message(reply, message.channel_id)
return
if bot.entity_manager.get_user(user_id) is None:
# Register new user with DM channel ID.
channel = await bot.get_channel(message.channel_id)
if channel.type != Channel.Type.DM:
channel = await bot.client.get_dm_channel(user_id)
bot.entity_manager.create_user(user_id, channel.id)
await bot.entity_manager.update_user_site_profile(user_id, profile)
embed = bot.entity_manager.get_user(user_id).get_profile_embed(site_tag)
reply = {
'content': '*Your profile has been registered*',
'embed': embed,
}
await bot.client.send_message(reply, message.channel_id)
@command.command(usage='unsub at|cc|cf',
desc='Unsubscribe from profile changes',
allow_dm=True)
async def unsub(bot, args, message):
command.assert_arglen(args, 1, cmd=message.content)
user_id = message.author.id
site_tag = args[0].lower()
site_name = bot.site_container.get_site_name(site_tag)
command.assert_not_none(site_name, msg='Unrecognized site', cmd=message.content)
user = bot.entity_manager.get_user(user_id)
if user is None:
reply = {'content': f'*You are not subscribed to {site_name}*'}
await bot.client.send_message(reply, message.channel_id)
return
profile = bot.entity_manager.get_user(user_id).get_profile_for_site(site_tag)
if profile is None:
reply = {'content': f'*You are not subscribed to {site_name}*'}
await bot.client.send_message(reply, message.channel_id)
return
await bot.entity_manager.delete_user_site_profile(user_id, site_tag)
reply = {'content': f'*You are now unsubscribed from {site_name}*'}
await bot.client.send_message(reply, message.channel_id)
| 39.065637 | 108 | 0.639553 |
ca560d208992489d54ae1679dee20d5427926416 | 5,841 | py | Python | main.py | ArihantChawla/indian-alpr | 531dd815f0a5c819b411fd156aabca960d72b7b6 | [
"Apache-2.0"
] | null | null | null | main.py | ArihantChawla/indian-alpr | 531dd815f0a5c819b411fd156aabca960d72b7b6 | [
"Apache-2.0"
] | null | null | null | main.py | ArihantChawla/indian-alpr | 531dd815f0a5c819b411fd156aabca960d72b7b6 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2018 understand.ai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from anonymizer.anonymization import Anonymizer
from anonymizer.detection import Detector, download_weights, get_weights_path
from anonymizer.obfuscation import Obfuscator
from anonymizer.cropping import Cropper
def parse_args():
parser = argparse.ArgumentParser(
description='Anonymize faces and license plates in a series of images.')
parser.add_argument('--input', required=True,
metavar='/path/to/input_folder',
help='Path to a folder that contains the images that should be anonymized. '
'Images can be arbitrarily nested in subfolders and will still be found.')
parser.add_argument('--image-output', required=True,
metavar='/path/to/output_foler',
help='Path to the folder the anonymized images should be written to. '
'Will mirror the folder structure of the input folder.')
parser.add_argument('--weights', required=True,
metavar='/path/to/weights_foler',
help='Path to the folder where the weights are stored. If no weights with the '
'appropriate names are found they will be downloaded automatically.')
parser.add_argument('--image-extensions', required=False, default='jpg,png',
metavar='"jpg,png"',
help='Comma-separated list of file types that will be anonymized')
parser.add_argument('--face-threshold', type=float, required=False, default=0.3,
metavar='0.3',
help='Detection confidence needed to anonymize a detected face. '
'Must be in [0.001, 1.0]')
parser.add_argument('--plate-threshold', type=float, required=False, default=0.3,
metavar='0.3',
help='Detection confidence needed to anonymize a detected license plate. '
'Must be in [0.001, 1.0]')
parser.add_argument('--write-detections', dest='write_detections', action='store_true')
parser.add_argument('--no-write-detections', dest='write_detections', action='store_false')
parser.set_defaults(write_detections=True)
parser.add_argument('--obfuscation-kernel', required=False, default='21,1,9',
metavar='kernel_size,sigma,box_kernel_size',
help='This parameter is used to change the way the blurring is done. '
'For blurring a gaussian kernel is used. The default size of the kernel is 21 pixels '
'and the default value for the standard deviation of the distribution is 2. '
'Higher values of the first parameter lead to slower transitions while blurring and '
'larger values of the second parameter lead to sharper edges and less blurring. '
'To make the transition from blurred areas to the non-blurred image smoother another '
'kernel is used which has a default size of 9. Larger values lead to a smoother '
'transition. Both kernel sizes must be odd numbers.')
args = parser.parse_args()
print(f'input: {args.input}')
print(f'image-output: {args.image_output}')
print(f'weights: {args.weights}')
print(f'image-extensions: {args.image_extensions}')
print(f'face-threshold: {args.face_threshold}')
print(f'plate-threshold: {args.plate_threshold}')
print(f'write-detections: {args.write_detections}')
print(f'obfuscation-kernel: {args.obfuscation_kernel}')
print()
return args
def main(input_path, image_output_path, weights_path, image_extensions, face_threshold, plate_threshold,
write_json, obfuscation_parameters):
download_weights(download_directory=weights_path)
kernel_size, sigma, box_kernel_size = obfuscation_parameters.split(',')
obfuscator = Obfuscator(kernel_size=int(kernel_size), sigma=float(sigma), box_kernel_size=int(box_kernel_size))
detectors = {
'face': Detector(kind='face', weights_path=get_weights_path(weights_path, kind='face')),
'plate': Detector(kind='plate', weights_path=get_weights_path(weights_path, kind='plate'))
}
detection_thresholds = {
'face': face_threshold,
'plate': plate_threshold
}
anonymizer = Anonymizer(obfuscator=obfuscator, detectors=detectors)
anonymizer.anonymize_images(input_path=input_path, output_path=image_output_path,
detection_thresholds=detection_thresholds, file_types=image_extensions.split(','),
write_json=write_json)
####
cropper = Cropper(output_path=image_output_path)
cropper.placeholder()
####
if __name__ == '__main__':
args = parse_args()
main(input_path=args.input, image_output_path=args.image_output, weights_path=args.weights,
image_extensions=args.image_extensions,
face_threshold=args.face_threshold, plate_threshold=args.plate_threshold,
write_json=args.write_detections, obfuscation_parameters=args.obfuscation_kernel)
| 51.690265 | 115 | 0.659819 |
5dedcf7077e32554e1a637aaa26ff78e8e73abdd | 2,187 | py | Python | epytope/Data/pssms/epidemix/mat/A_25_10.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/epidemix/mat/A_25_10.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/epidemix/mat/A_25_10.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | A_25_10 = {0: {'A': -2.7, 'C': -2.1, 'E': 1.9, 'D': 0.2, 'G': 0.0, 'F': -3.1, 'I': -3.0, 'H': 1.5, 'K': -2.3, 'M': -2.0, 'L': -3.5, 'N': -2.2, 'Q': -1.6, 'P': -2.7, 'S': 0.0, 'R': -2.4, 'T': 0.7, 'W': -1.7, 'V': 0.0, 'Y': -2.4}, 1: {'A': -2.6, 'C': -1.2, 'E': -3.0, 'D': -3.1, 'G': -3.5, 'F': -1.9, 'I': 1.1, 'H': -2.2, 'K': -2.9, 'M': 1.1, 'L': 0.2, 'N': -2.7, 'Q': -2.4, 'P': -2.7, 'S': -2.7, 'R': -2.7, 'T': 1.0, 'W': -1.3, 'V': 1.8, 'Y': -2.0}, 2: {'A': -0.4, 'C': -1.5, 'E': -3.0, 'D': 0.0, 'G': -3.5, 'F': 1.1, 'I': 1.7, 'H': -2.0, 'K': -3.1, 'M': 0.8, 'L': -2.5, 'N': -2.7, 'Q': -2.6, 'P': 0.3, 'S': 0.0, 'R': -3.0, 'T': -2.4, 'W': -1.2, 'V': 0.6, 'Y': 0.7}, 3: {'A': -0.2, 'C': -2.2, 'E': 0.7, 'D': 1.0, 'G': -3.1, 'F': 0.1, 'I': -3.5, 'H': 0.8, 'K': 0.0, 'M': -2.2, 'L': -3.8, 'N': 1.7, 'Q': -2.0, 'P': 0.5, 'S': -0.1, 'R': -2.5, 'T': -2.6, 'W': -1.7, 'V': -3.5, 'Y': 0.7}, 4: {'A': -0.1, 'C': -1.6, 'E': -0.2, 'D': -2.8, 'G': -0.1, 'F': 1.4, 'I': -2.2, 'H': 1.3, 'K': 0.5, 'M': -1.2, 'L': 0.3, 'N': -2.4, 'Q': -2.0, 'P': -2.8, 'S': -2.6, 'R': 0.9, 'T': 0.1, 'W': -1.2, 'V': 0.2, 'Y': -1.6}, 5: {'A': -0.6, 'C': -1.5, 'E': -0.2, 'D': -0.1, 'G': 0.2, 'F': -2.1, 'I': 0.5, 'H': 0.7, 'K': -0.1, 'M': 1.2, 'L': 0.7, 'N': -2.4, 'Q': -2.1, 'P': 0.8, 'S': -2.7, 'R': 0.0, 'T': 0.0, 'W': -1.4, 'V': -0.1, 'Y': -2.2}, 6: {'A': -0.4, 'C': 1.3, 'E': -3.0, 'D': -3.1, 'G': -3.4, 'F': 0.5, 'I': 0.9, 'H': 0.9, 'K': -0.6, 'M': -0.9, 'L': 0.8, 'N': -2.7, 'Q': -2.4, 'P': 0.2, 'S': -0.3, 'R': -2.7, 'T': 0.3, 'W': -1.2, 'V': 0.4, 'Y': 0.4}, 7: {'A': 0.3, 'C': -1.6, 'E': -0.3, 'D': -2.6, 'G': -3.0, 'F': 0.2, 'I': -2.5, 'H': 1.0, 'K': 0.1, 'M': 0.8, 'L': 0.0, 'N': 0.3, 'Q': 1.1, 'P': -2.7, 'S': 0.7, 'R': -2.4, 'T': 0.4, 'W': -1.5, 'V': -0.2, 'Y': -2.0}, 8: {'A': 1.0, 'C': -1.5, 'E': 0.2, 'D': -2.4, 'G': 0.7, 'F': 0.3, 'I': -3.0, 'H': 1.0, 'K': -2.5, 'M': -1.8, 'L': -3.4, 'N': 0.0, 'Q': -2.0, 'P': -2.5, 'S': 0.7, 'R': -2.6, 'T': 1.2, 'W': -1.4, 'V': -2.8, 'Y': -2.1}, 9: {'A': -5.0, 'C': -3.4, 'E': -5.2, 'D': -5.0, 'G': -5.3, 'F': 1.2, 'I': -4.0, 'H': -2.8, 'K': -5.2, 'M': -3.1, 'L': -4.3, 'N': -4.9, 'Q': -4.4, 'P': -5.0, 'S': -4.9, 'R': -5.0, 'T': -4.7, 'W': 3.9, 'V': -4.3, 'Y': 1.3}} | 2,187 | 2,187 | 0.282122 |
4c3c4da8588b734209289e17d11120ad3202becb | 5,827 | py | Python | main.py | matbocz/InterpolationComparison-DesktopApp | 4b76b00a4067506c9626c0bb93b36867d5cacb79 | [
"MIT"
] | null | null | null | main.py | matbocz/InterpolationComparison-DesktopApp | 4b76b00a4067506c9626c0bb93b36867d5cacb79 | [
"MIT"
] | 2 | 2022-01-13T03:47:11.000Z | 2022-03-12T00:58:03.000Z | main.py | matbocz/InterpolationComparison-DesktopApp | 4b76b00a4067506c9626c0bb93b36867d5cacb79 | [
"MIT"
] | null | null | null | """This is a module for interpolation comparison."""
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QGridLayout, QLabel,
QLineEdit, QComboBox, QPushButton, QMessageBox)
from PyQt5.QtCore import Qt
import interpolation
class ApplicationGUI(QWidget):
"""This is a class that creates a graphical user interface."""
def __init__(self):
super().__init__()
self.layout = QGridLayout()
self.lbl1 = QLabel("Enter starting value:")
self.lbl2 = QLabel("Enter end value:")
self.lbl3 = QLabel("Enter number of samples:")
self.lbl4 = QLabel(
"Enter interpolated function (example: sin(x) + 2):")
self.lbl5 = QLabel("Choose first kind of interpolation:")
self.lbl6 = QLabel("Choose second kind of interpolation:")
self.edit1 = QLineEdit()
self.edit2 = QLineEdit()
self.edit3 = QLineEdit()
self.edit4 = QLineEdit()
self.box1 = QComboBox()
self.box1.addItems([
"linear", "nearest", "zero", "slinear", "quadratic", "cubic",
"previous", "next"
])
self.box2 = QComboBox()
self.box2.addItems([
"linear", "nearest", "zero", "slinear", "quadratic", "cubic",
"previous", "next"
])
self.btn_start = QPushButton("START!")
self.btn_reset = QPushButton("RESET")
self.start = 0.0
self.stop = 0.0
self.samples = 0
self.function = ""
self.first_kind = ""
self.second_kind = ""
def set_interface(self):
"""This is a function to set interface."""
# Set window properties
self.setWindowTitle("Interpolation Comparison")
self.resize(600, 1)
# Labels to layout
self.layout.addWidget(self.lbl1, 0, 0)
self.layout.addWidget(self.lbl2, 1, 0)
self.layout.addWidget(self.lbl3, 2, 0)
self.layout.addWidget(self.lbl4, 3, 0)
self.layout.addWidget(self.lbl5, 4, 0)
self.layout.addWidget(self.lbl6, 5, 0)
# LineEdits to layout
self.layout.addWidget(self.edit1, 0, 1)
self.layout.addWidget(self.edit2, 1, 1)
self.layout.addWidget(self.edit3, 2, 1)
self.layout.addWidget(self.edit4, 3, 1)
# ComboBoxs to layout
self.layout.addWidget(self.box1, 4, 1)
self.layout.addWidget(self.box2, 5, 1)
# Buttons to layout
self.layout.addWidget(self.btn_start, 6, 1)
self.layout.addWidget(self.btn_reset, 6, 0)
# Set button connections
self.btn_start.clicked.connect(self.btn_start_clicked)
self.btn_reset.clicked.connect(self.btn_reset_clicked)
# Set layout
self.setLayout(self.layout)
def btn_start_clicked(self):
"""This is a function for handling btn_start clicks."""
# Checking if the fields are not empty
if self.edit1.text() == "" or self.edit2.text(
) == "" or self.edit3.text() == "" or self.edit4.text() == "":
QMessageBox.warning(self, "Error", "You must complete all fields!",
QMessageBox.Ok)
exit(1)
# Checking and assigning values taken from fields
try:
self.start = float(self.edit1.text())
self.stop = float(self.edit2.text())
self.samples = int(self.edit3.text())
except (ValueError): # pylint: disable=superfluous-parens
QMessageBox.warning(self, "Error",
"You have entered an invalid data type!",
QMessageBox.Ok)
exit(1)
# Assigning values taken from fields
self.function = self.edit4.text()
self.first_kind = self.box1.currentText()
self.second_kind = self.box2.currentText()
# Checking START and STOP values
if (self.start >= self.stop): # pylint: disable=superfluous-parens
QMessageBox.warning(
self, "Error",
"The stop value must be greater than the start value",
QMessageBox.Ok)
exit(1)
# Checking the number of samples
if (self.samples <= 5): # pylint: disable=superfluous-parens
QMessageBox.warning(
self, "Error", "The number of samples must be greater than 5!",
QMessageBox.Ok)
exit(1)
# Run interpolation function
interp = interpolation.Interpolation(self.start, self.stop,
self.samples, self.function,
self.first_kind, self.second_kind)
interp.interpolation_graph()
interp.interpolation_table(self.first_kind)
interp.interpolation_table(self.second_kind)
def btn_reset_clicked(self):
"""This is a function for handling btn_reset clicks."""
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
self.box1.clear()
self.box1.addItems([
"linear", "nearest", "zero", "slinear", "quadratic", "cubic",
"previous", "next"
])
self.box2.clear()
self.box2.addItems([
"linear", "nearest", "zero", "slinear", "quadratic", "cubic",
"previous", "next"
])
def keyPressEvent(self, e): # pylint: disable=invalid-name
"""This is a function to close app using the ESC key."""
if e.key() == Qt.Key_Escape:
self.close()
def main():
"""This is a function to run app."""
app = QApplication(sys.argv)
gui = ApplicationGUI()
gui.set_interface()
gui.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 32.553073 | 79 | 0.569933 |
2f0de21ee89eff12b95dcb54994376bc69f81289 | 203 | py | Python | platform/core/polyaxon/compiler/managers/job.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | platform/core/polyaxon/compiler/managers/job.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | platform/core/polyaxon/compiler/managers/job.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | from compiler.managers.base import BaseCompileManager
from schemas import JobSpecification, kinds
class JobCompileManager(BaseCompileManager):
KIND = kinds.JOB
SPECIFICATION = JobSpecification
| 25.375 | 53 | 0.82266 |
ba80ac245a0e5a2d03794e5d68fb25fadd0e37b3 | 1,012 | py | Python | backend/server/server/wsgi.py | venkyPy-2019/my_ml_service | c75d8fc060866b43d9a563b7ce652d5ed11007a4 | [
"MIT"
] | null | null | null | backend/server/server/wsgi.py | venkyPy-2019/my_ml_service | c75d8fc060866b43d9a563b7ce652d5ed11007a4 | [
"MIT"
] | null | null | null | backend/server/server/wsgi.py | venkyPy-2019/my_ml_service | c75d8fc060866b43d9a563b7ce652d5ed11007a4 | [
"MIT"
] | 1 | 2021-02-23T10:31:50.000Z | 2021-02-23T10:31:50.000Z | """
WSGI config for server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings')
application = get_wsgi_application()
# ML registry
import inspect
from apps.ml.registry import MLRegistry
from apps.ml.income_classifier.random_forest import RandomForestClassifier
try:
registry = MLRegistry()
rf = RandomForestClassifier()
registry.add_algorithm(endpoint_name="income_classifier", algorithm_object=rf, algorithm_name="random forest", algorithm_status="production", algorithm_version="0.0.1", owner="Venky", algorithm_description="Random Forest with simple pre- and post-processing", algorithm_code=inspect.getsource(RandomForestClassifier))
except Exception as e:
print("Exception while loading the algorithms to the registry,", str(e))
| 34.896552 | 318 | 0.805336 |
a117f37a8985bd11835be8c111823704d904ebc9 | 3,034 | py | Python | fnmatch/fnmatch.py | CleberPeter/micropython-lib | 5ffce0338452e8d1e2cd2790bb5f92b8bcadb49b | [
"PSF-2.0"
] | 1 | 2019-07-30T15:35:46.000Z | 2019-07-30T15:35:46.000Z | fnmatch/fnmatch.py | CleberPeter/micropython-lib | 5ffce0338452e8d1e2cd2790bb5f92b8bcadb49b | [
"PSF-2.0"
] | null | null | null | fnmatch/fnmatch.py | CleberPeter/micropython-lib | 5ffce0338452e8d1e2cd2790bb5f92b8bcadb49b | [
"PSF-2.0"
] | 1 | 2020-11-08T23:35:54.000Z | 2020-11-08T23:35:54.000Z | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import os.path
import re
#import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
#@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
# Original patterns is undefined, see http://bugs.python.org/issue21464
return '(?ms)' + res + '\Z'
| 28.895238 | 75 | 0.566249 |
3118ee2654b7ef0e1c0697c5234959a7894e5c0e | 2,954 | py | Python | nmcli/_connection.py | meramsey/nmcli | d2ce0cfbad13cb61bb9f4e1c674a8a9c2ad2f7ed | [
"MIT"
] | 1 | 2022-03-19T05:59:53.000Z | 2022-03-19T05:59:53.000Z | nmcli/_connection.py | meramsey/nmcli | d2ce0cfbad13cb61bb9f4e1c674a8a9c2ad2f7ed | [
"MIT"
] | 45 | 2021-01-04T12:05:07.000Z | 2021-04-21T14:35:55.000Z | nmcli/_connection.py | meramsey/nmcli | d2ce0cfbad13cb61bb9f4e1c674a8a9c2ad2f7ed | [
"MIT"
] | 2 | 2021-03-26T12:35:07.000Z | 2021-05-01T02:15:16.000Z | import re
from typing import List, Optional
from ._system import SystemCommand, SystemCommandInterface
from .data.connection import Connection, ConnectionDetails, ConnectionOptions
class ConnectionControlInterface:
def __call__(self) -> List[Connection]:
raise NotImplementedError
def add(self,
conn_type: str,
options: Optional[ConnectionOptions] = None,
ifname: str = "*",
name: str = None,
autoconnect: bool = False) -> None:
raise NotImplementedError
def modify(self, name: str, options: ConnectionOptions) -> None:
raise NotImplementedError
def delete(self, name: str) -> None:
raise NotImplementedError
def up(self, name: str) -> None:
raise NotImplementedError
def down(self, name: str) -> None:
raise NotImplementedError
def show(self, name: str) -> ConnectionDetails:
raise NotImplementedError
def reload(self) -> None:
raise NotImplementedError
class ConnectionControl(ConnectionControlInterface):
def __init__(self, syscmd: SystemCommandInterface = None):
self._syscmd = syscmd or SystemCommand()
def __call__(self) -> List[Connection]:
r = self._syscmd.nmcli('connection')
results = []
for row in r.split('\n')[1:]:
if len(row) == 0:
continue
results.append(Connection.parse(row))
return results
def add(self,
conn_type: str,
options: Optional[ConnectionOptions] = None,
ifname: str = "*",
name: str = None,
autoconnect: bool = False) -> None:
params = ['connection', 'add', 'type', conn_type, 'ifname', ifname]
if not name is None:
params += ['con-name', name]
options = {} if options is None else options
for k, v in options.items():
params += [k, v]
self._syscmd.nmcli(params)
def modify(self, name: str, options: ConnectionOptions) -> None:
params = ['connection', 'modify', name]
for k, v in options.items():
params += [k, v]
self._syscmd.nmcli(params)
def delete(self, name: str) -> None:
self._syscmd.nmcli(['connection', 'delete', name])
def up(self, name: str) -> None:
self._syscmd.nmcli(['connection', 'up', name])
def down(self, name: str) -> None:
self._syscmd.nmcli(['connection', 'down', name])
def show(self, name: str) -> ConnectionDetails:
r = self._syscmd.nmcli(['connection', 'show', name])
results = {}
for row in r.split('\n'):
m = re.search(r'^(\S+):\s*([\S\s]+)\s*', row)
if m:
key, value = m.groups()
results[key] = None if value in ('--', '""') else value
return results
def reload(self) -> None:
self._syscmd.nmcli(['connection', 'reload'])
| 31.094737 | 77 | 0.580907 |
80921e614cf9bf50f793c69ade6fb64a7122aa1a | 3,112 | py | Python | python/2015/aoc_07_a.py | nickspoons/adventofcode | 4aef222ccc9dcea09b4010d65d5f9cd32683323a | [
"MIT"
] | null | null | null | python/2015/aoc_07_a.py | nickspoons/adventofcode | 4aef222ccc9dcea09b4010d65d5f9cd32683323a | [
"MIT"
] | null | null | null | python/2015/aoc_07_a.py | nickspoons/adventofcode | 4aef222ccc9dcea09b4010d65d5f9cd32683323a | [
"MIT"
] | null | null | null | """ Advent of Code, 2015: Day 07, a """
import re
with open(__file__[:-5] + "_input") as f:
inputs = [line.strip() for line in f]
CONSTANT = 0
AND = 1
OR = 2
NOT = 3
LSHIFT = 4
RSHIFT = 5
def read():
""" Use regular expressions to read logic gates into a dictionary """
wires = dict()
for instruction in inputs:
[line_in, line_out] = instruction.split(" -> ")
match = re.match(r"^(\S+)$", line_in)
if match:
a = match.group(1)
a = int(a) if a.isdigit() else a
wires[line_out] = [CONSTANT, [a]]
continue
match = re.match(r"(\S+) AND (\S+)", line_in)
if match:
a = match.group(1)
a = int(a) if a.isdigit() else a
wires[line_out] = [AND, [a, match.group(2)]]
continue
match = re.match(r"(\S+) OR (\S+)", line_in)
if match:
wires[line_out] = [OR, [match.group(1), match.group(2)]]
continue
match = re.match(r"NOT (\S+)", line_in)
if match:
wires[line_out] = [NOT, [match.group(1)]]
continue
match = re.match(r"(\S+) LSHIFT (\S+)", line_in)
if match:
wires[line_out] = [LSHIFT, [match.group(1), int(match.group(2))]]
continue
match = re.match(r"(\S+) RSHIFT (\S+)", line_in)
if match:
wires[line_out] = [RSHIFT, [match.group(1), int(match.group(2))]]
continue
return wires
def calculate(wire):
""" Return tuple: (wire is complete, completed value) """
if wire[0] == CONSTANT:
return isinstance(wire[1][0], int), wire[1][0]
if wire[0] == AND:
if isinstance(wire[1][0], int) and isinstance(wire[1][1], int):
return True, wire[1][0] & wire[1][1]
if wire[0] == OR:
if isinstance(wire[1][0], int) and isinstance(wire[1][1], int):
return True, wire[1][0] | wire[1][1]
if wire[0] == NOT:
if isinstance(wire[1][0], int):
return True, 0b1111111111111111 - wire[1][0]
if wire[0] == LSHIFT:
if isinstance(wire[1][0], int):
return True, wire[1][0] << wire[1][1]
if wire[0] == RSHIFT:
if isinstance(wire[1][0], int):
return True, wire[1][0] >> wire[1][1]
return False, ""
def propagate(wires, line_out, completed):
""" Find all line_in values and replace them with the completed value """
for key in wires:
for i, line_in in enumerate(wires[key][1]):
if line_in == line_out:
wires[key][1][i] = completed
def run():
""" Read wire logic in, then link ouputs to inputs until complete """
wires = read()
while len(wires) > 0:
lines_out = list(wires.keys())
for line_out in lines_out:
completed = calculate(wires[line_out])
if completed[0]:
if line_out == "a":
return completed[1]
propagate(wires, line_out, completed[1])
wires.pop(line_out)
return 0
if __name__ == "__main__":
print(run())
| 30.811881 | 77 | 0.525064 |
ac030610a492b8ab3ded3ac12616d28ffe94b1d4 | 1,181 | py | Python | tests/test_flightphase.py | ge-flight-analytics/emsPy | 763fe0f26ce695fe4814f222da1e7b17079f8e12 | [
"MIT"
] | 9 | 2017-07-18T18:02:11.000Z | 2020-10-03T04:51:02.000Z | tests/test_flightphase.py | ge-flight-analytics/emsPy | 763fe0f26ce695fe4814f222da1e7b17079f8e12 | [
"MIT"
] | 39 | 2018-09-20T14:57:10.000Z | 2021-07-01T19:50:29.000Z | tests/test_flightphase.py | ge-flight-analytics/emsPy | 763fe0f26ce695fe4814f222da1e7b17079f8e12 | [
"MIT"
] | 8 | 2017-11-03T05:06:08.000Z | 2021-02-23T08:25:18.000Z | import pytest
from emspy.query import FlightPhase
from mock_connection import MockConnection
from mock_ems import MockEMS
import pandas as pd
@pytest.fixture(scope='session')
def query():
c = MockConnection(user='', pwd='')
FlightPhaseQuery = FlightPhase(c)
return FlightPhaseQuery
def test_data_colnames(query):
expected_colnames = {
'id',
'name'
}
assert set(query.data_colnames()) == expected_colnames
def test_list_all(query):
assert isinstance(query.list_all(), pd.DataFrame)
def test_search_contain(query):
search = query.search('name', val='Taxi', searchtype='contain')
assert isinstance(search, pd.DataFrame)
assert all(search['name'].str.contains('Taxi'))
def test_search_match(query):
search = query.search('name', val='unknown state', searchtype='match')
assert isinstance(search, pd.DataFrame)
assert search.shape[0] == 1
assert search['name'].iloc[0] == 'unknown state'
def test_get_id(query):
flightphase_id = query.get_id('Taxi Out')
assert flightphase_id == 1
def test_get_name(query):
flightphase_name = query.get_name(2)
assert flightphase_name == 'C) Takeoff'
| 24.604167 | 74 | 0.707028 |
760756a4398bbd8beb675a2e9711640bc88bc2cd | 3,699 | py | Python | examples/provisioning/ble_prov/ble_prov_test.py | lf2050/esp-idf | a45e9985344575a80acb1dc9c12e7bec4d8af401 | [
"Apache-2.0"
] | 2 | 2020-09-19T15:30:33.000Z | 2020-09-19T15:30:46.000Z | examples/provisioning/ble_prov/ble_prov_test.py | lf2050/esp-idf | a45e9985344575a80acb1dc9c12e7bec4d8af401 | [
"Apache-2.0"
] | 1 | 2020-09-14T18:20:07.000Z | 2020-09-14T18:23:11.000Z | examples/provisioning/ble_prov/ble_prov_test.py | lf2050/esp-idf | a45e9985344575a80acb1dc9c12e7bec4d8af401 | [
"Apache-2.0"
] | 1 | 2021-06-19T15:28:32.000Z | 2021-06-19T15:28:32.000Z | #!/usr/bin/env python
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import os
import sys
import time
try:
import IDF
from IDF.IDFDUT import ESP32DUT
except ImportError:
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
try:
import esp_prov
except ImportError:
esp_prov_path = os.getenv("IDF_PATH") + "/tools/esp_prov"
if esp_prov_path and esp_prov_path not in sys.path:
sys.path.insert(0, esp_prov_path)
import esp_prov
# Have esp_prov throw exception
esp_prov.config_throw_except = True
@IDF.idf_example_test(env_tag="Example_WIFI_BT")
def test_examples_provisioning_ble(env, extra_data):
# Acquire DUT
dut1 = env.get_dut("ble_prov", "examples/provisioning/ble_prov", dut_class=ESP32DUT)
# Get binary file
binary_file = os.path.join(dut1.app.binary_path, "ble_prov.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("ble_prov_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("ble_prov_bin_size", bin_size // 1024)
# Upload binary and start testing
dut1.start_app()
# Parse BLE devname
devname = dut1.expect(re.compile(r"Provisioning started with BLE devname : '(PROV_\S\S\S\S\S\S)'"), timeout=60)[0]
print("BLE Device Alias for DUT :", devname)
# Match additional headers sent in the request
dut1.expect("BLE Provisioning started", timeout=30)
print("Starting Provisioning")
verbose = False
protover = "V0.1"
secver = 1
pop = "abcd1234"
provmode = "ble"
ap_ssid = "myssid"
ap_password = "mypassword"
print("Getting security")
security = esp_prov.get_security(secver, pop, verbose)
if security is None:
raise RuntimeError("Failed to get security")
print("Getting transport")
transport = esp_prov.get_transport(provmode, devname)
if transport is None:
raise RuntimeError("Failed to get transport")
print("Verifying protocol version")
if not esp_prov.version_match(transport, protover):
raise RuntimeError("Mismatch in protocol version")
print("Starting Session")
if not esp_prov.establish_session(transport, security):
raise RuntimeError("Failed to start session")
print("Sending Wifi credential to DUT")
if not esp_prov.send_wifi_config(transport, security, ap_ssid, ap_password):
raise RuntimeError("Failed to send Wi-Fi config")
print("Applying config")
if not esp_prov.apply_wifi_config(transport, security):
raise RuntimeError("Failed to send apply config")
success = False
while True:
time.sleep(5)
print("Wi-Fi connection state")
ret = esp_prov.get_wifi_config(transport, security)
if (ret == 1):
continue
elif (ret == 0):
print("Provisioning was successful")
success = True
break
if not success:
raise RuntimeError("Provisioning failed")
if __name__ == '__main__':
test_examples_provisioning_ble()
| 31.347458 | 118 | 0.702893 |
710bb57136b986ac9951dfc79c878181e6d11eb3 | 606 | py | Python | homedisplay/info_transportation/migrations/0005_auto_20150225_2359.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 1 | 2016-11-28T04:35:06.000Z | 2016-11-28T04:35:06.000Z | homedisplay/info_transportation/migrations/0005_auto_20150225_2359.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 160 | 2015-01-01T20:59:29.000Z | 2016-04-25T13:36:52.000Z | homedisplay/info_transportation/migrations/0005_auto_20150225_2359.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 1 | 2015-02-25T21:24:01.000Z | 2015-02-25T21:24:01.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0004_auto_20150225_2202'),
]
operations = [
migrations.AddField(
model_name='line',
name='line_number_raw',
field=models.CharField(default=1, max_length=20),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='line',
unique_together=set([('stop', 'line_number_raw')]),
),
]
| 24.24 | 63 | 0.59901 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.