text string | size int64 | token_count int64 |
|---|---|---|
from server import crud
def test_filter(product_1, product_2):
result, content_range = crud.product_crud.get_multi(filter_parameters=["name:duct 1"], sort_parameters=[])
assert len(result) == 1
assert content_range == "products 0-100/1"
# Will not filter but return all results
result, content_range = crud.product_crud.get_multi(filter_parameters=["NONEXISITANT:0"], sort_parameters=[])
assert len(result) == 2
assert content_range == "products 0-100/2"
# Wild card match on all tables
result, content_range = crud.product_crud.get_multi(filter_parameters=["Product 1"], sort_parameters=[])
assert len(result) == 1
assert content_range == "products 0-100/1"
def test_sort(product_1, product_2):
result, content_range = crud.product_crud.get_multi(filter_parameters=[], sort_parameters=["name:ASC"])
assert len(result) == 2
assert content_range == "products 0-100/2"
assert result[0].name == "Product 1"
result, content_range = crud.product_crud.get_multi(filter_parameters=[], sort_parameters=["name:DESC"])
assert len(result) == 2
assert content_range == "products 0-100/2"
assert result[0].name == "Product 2"
# No Sort order
result, content_range = crud.product_crud.get_multi(filter_parameters=[], sort_parameters=["name"])
assert len(result) == 2
assert content_range == "products 0-100/2"
assert result[0].name == "Product 1"
# Non existant column impossible to sort
result, content_range = crud.product_crud.get_multi(filter_parameters=[], sort_parameters=["NONTRUE"])
assert len(result) == 2
assert content_range == "products 0-100/2"
# Non existant column impossible to sort on nonexistant method
result, content_range = crud.product_crud.get_multi(filter_parameters=[], sort_parameters=["NONTRUE:NONTRUE"])
assert len(result) == 2
assert content_range == "products 0-100/2"
| 1,926 | 646 |
import numpy as np
import trimesh
try:
from Satellite_Panel_Solar import Panel_Solar
from SatelitteActitud import SatelitteActitud
except:
from src.data.Satellite_Panel_Solar import Panel_Solar
from src.data.SatelitteActitud import SatelitteActitud
# noinspection SpellCheckingInspection
"""Satelitte Solar Power System
Es una clase donde se incluye todo el sistema de potencia del satelitte, permite incluir un modelo en CAD
y realizar su analisis de potencia dependiento de un vector utilizado como la direccion del sol hacia el satelite
Example:
Para llamar a esta clase solo hace falta, una linea como la de abajo:
$ Sat = SatelitteSolarPowerSystem(direccion='models/12U.stl')
Esta clase tiene varios atributos incluidos como la caracteristica de cada panel solar
para ello solo se necesita llamar a la clase con:
$ from Satellite_Panel_Solar import Panel_Solar
$ Sat.caracteristicas_panel_solar=[Panel_solar()]
Para ver como se configura cada Panel_solar hay que remitirse a su documentacion
Finalmente notar que el atributo mesh incluye todos aquellos del paquete trimesh
"""
class SatelitteSolarPowerSystem(object):
def __init__(self, direccion, SatelitteActitud, panel_despegable_dual=True, Despegables_orientables=False):
"""Se inicia la clase con el analisis de la figura para
encontrar paneles despegables, sombra, etc.
Args:
direccion: string con la direcion del archivo y con el tipo del archivo Ex. .STL, .OBJ, .PLY
panel_despegable_dual: Default(True)
"""
self.mesh = self.cargar_modelo(direccion)
self.numero_caras = len(self.mesh.facets)
# Normales de las caras en el momento 0
self.Normales_caras = self.mesh.facets_normal
self.Area_caras = self.mesh.facets_area
self.caracteristicas_panel_solar = [
Panel_Solar('Estandar')] * self.numero_caras
self.Caras_Despegables = self.caras_despegables()
self.sombra = self.posible_sombra()
self.mesh.vertices -= self.mesh.centroid
self.sun_plane = self.puntos_sol()
self.panel_despegable_dual = panel_despegable_dual
self.name = self.nombrar_caras()
self.actitud = SatelitteActitud
self.Despegables_orientables = Despegables_orientables
def cargar_modelo(self, direccion):
"""
cargar_modelo
Args:
direccion: string con la direcion del y con el tipo del archivo Ex. .STL, .OBJ, .PLY
Returns:
trimesh.mesh
"""
return trimesh.load_mesh(direccion)
def nombrar_caras(self):
"""
nombrar_caras
Nombra las caras del modelo para poder utilizarlas se realiza al principio porque si se gira cambiara
Simplemente nombra las caras con X, Y, Z
Returns:
name: devuelve el nombre de las caras de manera X, Y, Z
"""
name = []
j = 0
o = 0
for i in self.mesh.facets_normal:
i = np.round(i)
if (i == [1, 0, 0]).all():
name.append('X+')
elif (i == [-1, 0, 0]).all():
name.append('X-')
elif (i == [0, 1, 0]).all():
name.append('Y+')
elif (i == [0, -1, 0]).all():
name.append('Y-')
elif (i == [0, 0, -1]).all():
name.append('Z-')
elif (i == [0, 0, 1]).all():
name.append('Z+')
else:
name.append(f'Panel direction {i}')
if j in self.Caras_Despegables:
name[j] = name[j] + f' Panel Despegable {o}'
o += 1
j += 1
return name
def caras_despegables(self):
"""
caras_despegables
Localiza los paneles despegables, es un metodo bastante dificil
Returns:
caras_despeables: es el numero de las caras
"""
caras_despegables = []
# si las caras se encuentran con otras sin volumen como los paneles,
# esta las toman como rotas por trimesh por lo que se pueden localizar
for i in np.arange(0, len(trimesh.repair.broken_faces(self.mesh))):
caras_despegables.append(
np.array(np.where(self.mesh.facets == trimesh.repair.broken_faces(self.mesh)[i])).flatten()[0])
# se encuentran las caras que son despegables
# elimina las repetidas
caras_despegables = list(set(caras_despegables))
return caras_despegables
def posible_sombra(self):
"""
posible_sombra
buscar la cara mas cercana a los paneles que puede dar sombra
Returns:
sombra:numero de las caras que pueden tener sombra
"""
sombra = np.array(
np.where(self.mesh.facets_on_hull == False)).flatten()
return sombra
def puntos_sol(self):
"""
puntos_sol
Crea un conjunto de puntos de aquellos que darian sombra
con los centros de los paneles
Returns:
trimesh.mesh : Plano de puntos
"""
p = self.mesh.facets[self.sombra].flatten()
sun_plane = self.mesh.triangles_center[p]
return sun_plane
def celdas_activas(self, sun_vector):
"""
celdas_activas
Localiza las celdas activas de un mallado al buscarse los puntos donde golpearia un rayo en la malla des los puntos_sol
Args:
sun_vector (array(,3)) : Vector sol
Returns:
index_tri [array(n)]: El numero de triangulo que esta activo al ser golpeado por el sol
"""
sun_planeAux = self.puntos_sol()+5000*sun_vector
ray_origins = sun_planeAux
ray_directions = np.array([-sun_vector] * len(sun_planeAux))
if trimesh.ray.has_embree: # Hay una libreria que es embree solo funciona en linux pero va 50x mas rapido
index_tri = self.mesh.ray.intersects_first(
ray_origins=ray_origins, ray_directions=ray_directions)
else:
locations, index_ray, index_tri = self.mesh.ray.intersects_location(ray_origins=ray_origins,
ray_directions=ray_directions,
multiple_hits=False)
index_tri = list(set(index_tri))
return index_tri
def Add_prop_Panel(self, e):
"""
Add_prop_Panel
Añade propiedades al panel
Args:
e (Panel_Solar): Panel Solar
"""
self.caracteristicas_panel_solar.append(e)
def power_panel_solar(self, index_tri, Sun_vector, WSun):
"""
power_panel_solar
Obtiene la potencia producida por el satelite con actitud fija
Args:
index_tri (array(,:)): Celdas activas por el rayo
Sun_vector (array(,3)): Vector sol en LVLH
WSun (float): Potencia irradiada por el sol
Returns:
W (array(,n)) : Potencia generada
area_potencia (array(,n)) : Areas que generan potencia
ang (array(,n)) : Angulo de incidencia del vector sol con las caras
n : numero de caras
"""
# Producto escalar
ang = list(map(Sun_vector.dot, self.mesh.facets_normal))
# Se inicializan las variables
area_potencia = []
W = []
for i in np.arange(0, len(self.mesh.facets)):
# Esto es para si consideramos que
if (i in self.Caras_Despegables) & (self.panel_despegable_dual == True) & (ang[i] < 0):
ang_inc = -ang[i]
else:
ang_inc = ang[i]
# Buscar en las zonas donde es posible la sombra el valor propocional de area en los que incide la luz
if i in self.sombra:
o = np.isin(index_tri, self.mesh.facets[i])
o = o[o == True]
area = (
len(o) / len(self.mesh.facets[i])) * self.mesh.facets_area[i] / (1000 ** 2)
area_potencia.append(area)
else:
area = self.mesh.facets_area[i] / (1000 ** 2) # esta en mm^2
area_potencia.append(area)
# Esto es para eliminar las areas que no cumplen la ley de que menos de 15 grados no producen energia
if (ang_inc >= 0) & (ang_inc > (np.cos((np.pi / 180) * 75))):
W.append(
area * self.caracteristicas_panel_solar[i].psolar_rendimiento * WSun * ang_inc)
else:
W.append(0.)
return W, area_potencia, ang
def power_panel_con_actitud(self, Sun_vector, WSun):
"""
power_panel_con_actitud
Obtiene la potencia producida por el satelite con actitud apuntando al sol
Args:
Sun_vector (array(,3)): Vector sol en LVLH
WSun (float): Potencia irradiada por el sol
Returns:
W (array(,n)) : Potencia generada
area_potencia (array(,n)) : Areas que generan potencia
ang (array(,n)) : Angulo de incidencia del vector sol con las caras
angulo_giro (array(,n)) : Angulo de giro del satelite
n : numero de caras
"""
# Si los paneles son fijos al satelite
if self.Despegables_orientables == False:
if self.actitud.apuntado_sol == True:
# aqui empieza la magia
# la intencion era formar dos planos entre el eje de spin y el vector sol y otro
# con el eje de spin y una direccion principal de los paneles solares
# para poder calcular el angulo que deberia girarse entre los dos planos
direcion_principal = self.mesh.facets_normal[self.Caras_Despegables[0]]
plano0 = np.cross(Sun_vector, self.actitud.eje_de_spin)
plano0 = plano0/np.linalg.norm(plano0)
plano1 = np.cross(direcion_principal, self.actitud.eje_de_spin)
plano1 = plano1/np.linalg.norm(plano1)
angulo_giro = np.arccos(np.absolute(
np.dot(plano0, plano1)))/(np.linalg.norm(plano0)*np.linalg.norm(plano1))
if np.isnan(angulo_giro):
angulo_giro = 0.0
if angulo_giro == 0:
pass
else:
# Comprueba si la transformacion produciria que fuesen iguales los giros
prim = trimesh.transform_points(plano1.reshape(1, 3), trimesh.transformations.rotation_matrix(
angulo_giro, self.actitud.eje_de_spin, [0, 0, 0]))
if not np.allclose(prim, plano0):
angulo_giro = -angulo_giro
self.mesh = self.mesh.apply_transform(trimesh.transformations.rotation_matrix(
angulo_giro, self.actitud.eje_de_spin, [0, 0, 0]))
else:
angulo_giro = 0.0
index_tri = self.celdas_activas(Sun_vector)
W, area_potencia, ang = self.power_panel_solar(
index_tri, Sun_vector, WSun)
return W, area_potencia, ang, angulo_giro
else:
if self.actitud.apuntado_sol == True:
# mas magia por aqui
# pero ahora con lo de la proyeccion en unos ejes para poder utilizar el giro
# esto funciona bastante bien el problema es cuando se pasa el ecuador
direcion_principal = self.mesh.facets_normal[self.Caras_Despegables[0]]
direcion_principal = np.round(
direcion_principal/np.linalg.norm(direcion_principal), 5)
matrix_projection = trimesh.transformations.projection_matrix(
[0, 0, 0], self.actitud.eje_de_spin)[0:3, 0:3]
proyeccion = np.dot(matrix_projection, Sun_vector)
proyeccion = proyeccion/np.linalg.norm(proyeccion)
ver = np.arccos(np.dot(proyeccion, direcion_principal))
if np.isnan(ver):
ver = 0.0
if ver < 0.1e-4:
angulo_giro = 0.0
pass
else:
# print("proyeccion",proyeccion)
# print("direprinci",direcion_principal)
#angulo_giro=np.arccos(np.absolute(np.dot(direcion_principal, proyeccion)))/(np.linalg.norm(direcion_principal)*np.linalg.norm(proyeccion))
transforma = trimesh.geometry.align_vectors(
direcion_principal, proyeccion)
# posicion_eje=np.array(np.where(np.array(self.actitud.eje_de_spin)==1)).flatten().max()
angulo_giro = trimesh.transformations.rotation_from_matrix(transforma)[
0]
dir = trimesh.transform_points(
direcion_principal.reshape(1, 3), transforma)
if np.absolute(angulo_giro) > 0.05:
transforma2 = np.round(trimesh.geometry.align_vectors(
direcion_principal, -proyeccion), 5)
angulo_giro2 = trimesh.transformations.rotation_from_matrix(transforma2)[
0]
dir = trimesh.transform_points(
direcion_principal.reshape(1, 3), transforma)
if np.absolute(angulo_giro2) < np.absolute(angulo_giro):
transforma = transforma2
angulo_giro = angulo_giro2
else:
pass
# if plano1[posicion_eje]==0:
# angulo_giro=0.0
if np.isnan(angulo_giro):
angulo_giro = 0.0
pass
else:
self.mesh.apply_transform(transforma)
else:
angulo_giro = 0.0
ang = list(map(Sun_vector.dot, self.mesh.facets_normal))
area_potencia = []
W = []
angulo_giro = [angulo_giro]
for i in np.arange(0, len(self.mesh.facets)):
area = self.mesh.facets_area[i] / (1000 ** 2)
area_potencia.append(area)
if (i in self.Caras_Despegables):
angulo_giro.append(np.arccos(ang[i]))
ang[i] = 1
if (ang[i] >= 0) & (ang[i] > (np.cos((np.pi / 180) * 75))):
W.append(
area * self.caracteristicas_panel_solar[i].psolar_rendimiento * WSun * ang[i])
else:
W.append(0.)
return W, area_potencia, ang, angulo_giro
def Calculo_potencia(self, Sun_vector, WSun=1310):
"""
Calculo_potencia
Funcion general para llamar a las distintas funciones para calcular la potencia
Args:
Sun_vector ([type]): [description]
WSun (int, optional): [description]. Defaults to 1310.
Returns:
W (array(,n)) : Potencia generada
area_potencia (array(,n)) : Areas que generan potencia
ang (array(,n)) : Angulo de incidencia del vector sol con las caras
angulo_giro (array(,n)) : Angulo de giro del satelite
n : numero de caras
"""
if self.actitud.control_en_actitud == False:
index_tri = self.celdas_activas(Sun_vector)
W, area_potencia, ang = self.power_panel_solar(
index_tri, Sun_vector, WSun)
angulo_giro = []
# Ya que no hay giro pero nos lo piden habra que crearlo
[angulo_giro.append(np.NaN) for i in len(self.Caras_Despegables)]
else:
W, area_potencia, ang, angulo_giro = self.power_panel_con_actitud(
Sun_vector, WSun)
return W, area_potencia, ang, angulo_giro
def apply_transform(self, matrix):
"""
apply_transform
creada para hacer coincidir correctamente las caras
aplica una transformacion al satelite y reinicia los nombres
Args:
matrix (array(4,4)): matriz de transformacion
"""
self.mesh = self.mesh.apply_transform(matrix)
self.name = []
self.name = self.nombrar_caras()
self.Normales_caras = np.round(self.mesh.facets_normal)
def visual(self):
"""
visual
Crea una imagen visual del satelite con unos ejes funciona muy bien en notebook
y en linux tambien deberia de poder funcionar
Returns:
(scene): retoma una escena con los ejes
"""
ax = trimesh.creation.axis(axis_radius=25, axis_length=200)
scene = trimesh.Scene([self.mesh.apply_scale(1), ax])
return scene.show()
def separar_satelite(self):
"""
separar_satelite
Separa el satelite en mallas
Returns:
[type]: [description]
"""
y = np.array(
np.where(np.isin(self.sombra, self.Caras_Despegables) == False)).flatten()
despiece = []
despiece.append(self.mesh.split()[0])
for i in self.sombra[y]:
normal = self.mesh.facets_normal[i]
despiece.append(trimesh.intersections.slice_mesh_plane(self.mesh,
self.mesh.facets_normal[i],
self.mesh.facets_origin[i]+0.0001*self.mesh.facets_normal[i]))
return despiece
if __name__ == '__main__':
filename = '12Unuv.stl'
actitud = SatelitteActitud(eje_de_spin=[0, 1, 0], control=True)
d = SatelitteSolarPowerSystem(
filename, actitud, Despegables_orientables=True)
d.apply_transform(trimesh.transformations.rotation_matrix(
np.pi/2, [0, 1, 0], [0, 0, 0]))
Sun_vector = np.array([-0.10486044, 0.91244007, 0.39554696])
print(d.mesh.facets_normal)
W, area_potencia, ang, angulo_giro = d.power_panel_con_actitud(
Sun_vector, 1)
| 18,815 | 6,272 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class ResPartner(models.Model):
_inherit = 'res.partner'
# Use in view attrs. Need to required state_id if Country is India.
l10n_in_country_code = fields.Char(related="country_id.code", string="Country code")
@api.constrains('vat', 'country_id')
def l10n_in_check_vat(self):
for partner in self.filtered(lambda p: p.commercial_partner_id.country_id.code == 'IN' and p.vat and len(p.vat) != 15):
raise ValidationError(_('The GSTIN [%s] for partner [%s] should be 15 characters only.') % (partner.vat, partner.name))
| 744 | 263 |
import numpy as np
from scipy.interpolate import BSpline
from colossus.cosmology import cosmology
"""
Helper routines for basis functions for the continuous-function estimator.
"""
################
# Spline basis #
################
def spline_bases(rmin, rmax, projfn, ncomponents, ncont=2000, order=3):
'''
Compute a set of spline basis functions for the given order.
Parameters
----------
rmin : double
Minimum r-value for basis functions
rmax : double
Maximum r-value for basis functions
projfn : string, default=None
Path to projection file if necessary
ncomponents : int
Number of components (basis functions)
ncont : int, default=2000
Number of continuous r-values at which to write the basis function file
order : int, default=3
Order of spline to use; default is cubic spline
Returns
-------
bases: array-like, double
2-d array of basis function values; first column is r-values
'''
if ncomponents<order*2:
raise ValueError("ncomponents must be at least twice the order")
kvs = _get_knot_vectors(rmin, rmax, ncomponents, order)
rcont = np.linspace(rmin, rmax, ncont)
bases = np.empty((ncont, ncomponents+1))
bases[:,0] = rcont
for n in range(ncomponents):
kv = kvs[n]
b = BSpline.basis_element(kv)
bases[:,n+1] = [b(r) if kv[0]<=r<=kv[-1] else 0 for r in rcont]
np.savetxt(projfn, bases)
return bases
def _get_knot_vectors(rmin, rmax, ncomponents, order):
nknots = order+2
kvs = np.empty((ncomponents, nknots))
width = (rmax-rmin)/(ncomponents-order)
for i in range(order):
val = i+1
kvs[i,:] = np.concatenate((np.full(nknots-val, rmin), np.linspace(rmin+width, rmin+width*val, val)))
kvs[ncomponents-i-1] = np.concatenate((np.linspace(rmax-width*val, rmax-width, val), np.full(nknots-val, rmax)))
for j in range(ncomponents-2*order):
idx = j+order
kvs[idx] = rmin+width*j + np.arange(0,nknots)*width
return kvs
#############
# BAO basis #
#############
def bao_bases(rmin, rmax, projfn, cosmo_base=None, ncont=2000,
redshift=0.0, alpha_guess=1.0, dalpha=0.001, bias=1.0,
k0=0.1, k1=10.0, k2=0.1, k3=0.001):
'''
Compute the 5-component BAO basis functions based on a cosmological model and
linearized around the scale dilation parameter alpha.
Parameters
----------
rmin : double
Minimum r-value for basis functions
rmax : double
Maximum r-value for basis functions
projfn : string, default=None
Path to projection file if necessary
cosmo_base : nbodykit cosmology object, default=nbodykit.cosmology.Planck15
Cosmology object for the BAO model.
ncont : int, default=2000
Number of continuous r-values at which to write the basis function file
redshift : double, default=0.0
Redshift at which to compute power spectrum
alpha_guess : double, default=1.0
The alpha (scale dilation parameter) at which to compute the model (alpha=1.0 is no scale shift)
dalpha : double, default=0.001
The change in alpha (scale dilation parameter) used to calculate the numerical partial derivative
bias : double, default=1.0
The bias parameter by which to scale the model amplitude (bias=1.0 indicates no bias)
k0 : double, default=0.1
The initial magnitude of the derivative term
k1 : double, default=1.0
The initial magnitude of the s^2 nuisance parameter term
k2 : double, default=0.1
The initial magnitude of the s nuisance parameter term
k3 : double, default=0.001
The initial magnitude of the constant nuisance parameter term
Returns
-------
bases: array-like, double
2-d array of basis function values; first column is r-values
'''
if cosmo_base is None:
print("cosmo_base not provided, defaulting to Planck 2015 cosmology ('planck15')")
cosmo_base = cosmology.setCosmology('planck15')
cf = cosmo_base.correlationFunction
def cf_model(r):
return bias * cf(r, z=redshift)
rcont = np.linspace(rmin, rmax, ncont)
bs = _get_bao_components(rcont, cf_model, dalpha, alpha_guess, k0=k0, k1=k1, k2=k2, k3=k3)
nbases = len(bs)
bases = np.empty((ncont, nbases+1))
bases[:,0] = rcont
bases[:,1:nbases+1] = np.array(bs).T
np.savetxt(projfn, bases)
ncomponents = bases.shape[1]-1
return bases
def _get_bao_components(r, cf_func, dalpha, alpha, k0=0.1, k1=10.0, k2=0.1, k3=0.001):
b1 = k1/r**2
b2 = k2/r
b3 = k3*np.ones(len(r))
cf = cf_func(alpha*r)
b4 = cf
cf_dalpha = cf_func((alpha+dalpha)*r)
dcf_dalpha = _partial_derivative(cf, cf_dalpha, dalpha)
b5 = k0*dcf_dalpha
return b1,b2,b3,b4,b5
def _partial_derivative(f1, f2, dv):
df = f2-f1
deriv = df/dv
return deriv
| 5,037 | 1,796 |
import glob
import os
import pandas as pd
import json
import ast
from tqdm import tqdm
import click
import pickle
from multiprocessing import Pool, cpu_count, Queue
from functools import partial
import itertools
import sys
sys.setrecursionlimit(15000)
import logging
logpath = "./tree_matches.log"
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
ch = logging.FileHandler(logpath)
# ch.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(ch)
def replace_function_subtrees(coral_repr):
ignore = []
new_tree = []
for i in range(len(coral_repr)):
node = coral_repr[i]
if i in ignore:
#ignore the children too:
ignore = ignore + node.get("children",[])
continue
elif node["type"] == "Call":
ignore = ignore + node.get("children",[])[1:]
new_tree.append(node)
return new_tree
class Snippet(object):
def __init__(self,slug,version_id,source,competition = None,
max_size=512):
self.slug = slug
self.max_size = max_size
self.version_id = version_id
self.source = source
self.coral_repr = parse_string(source)[:self.max_size]
self.function_args_removed_repr = replace_function_subtrees(self.coral_repr)
self.python_ast = ast.parse(source)
def coral_diff(self,other,key = None,attr="coral_repr"):
a_attr = getattr(self,attr)
b_attr = getattr(other,attr)
return self.tree_diff(a_attr, b_attr, key = key)
def rear_pad_list(a,n):
m = len(a)
return a + [None for i in range(n-m)]
def make_same_length(a,b):
n = max(len(a),len(b))
a = rear_pad_list(a,n)
b = rear_pad_list(b,n)
return (a,b)
def tree_diff(self,a,b,key=None):
a,b = make_same_length(a,b)
if not key:
key = lambda aa,bb: not aa == bb
return sum([key(aa,bb) for (aa,bb) in zip(a,b)])
def to_dict(self):
return {"slug":self.slug, "version_id" : self.version_id,
"source":self.source}
def rear_pad_list(a,n):
m = len(a)
return a + [None for i in range(n-m)]
def make_same_length(a,b):
n = max(len(a),len(b))
a = rear_pad_list(a,n)
b = rear_pad_list(b,n)
return (a,b)
def tree_diff(a,b,key=None):
a,b = make_same_length(a,b)
if not key:
key = lambda aa,bb: not aa == bb
return sum([key(aa,bb) for (aa,bb) in zip(a,b)])
def looks_like_string(node):
node_type = node.get("type")
if node_type == "Constant":
try:
float(node.get("value"))
return False
except (ValueError,TypeError):
return True
else:
return False
def dont_count_strings(a,b):
if a is None or b is None:
return True
if looks_like_string(a) and looks_like_string(b):
return False
else:
return (not a == b)
def remove_duplicate_matches(matches):
to_return = []
record = set()
for match in matches:
if not (match[0].source,match[1].source) in record:
record.add((match[0].source,match[1].source))
to_return.append(match)
return to_return
# def get_matching_cells(kernel_trees,diff_versions = False, key = None):
# matches = []
# all_cells = []
# for slug,versions in kernel_trees.items():
# all_version_cells = []
# for version_id, cells in versions.items():
# if cells:
# for cell in cells:
# all_version_cells.append(cell)
# n = len(all_version_cells)
# if n == 1:
# continue
# for i in range(n):
# for j in range(i+1,n):
# cell_i = all_version_cells[i]
# cell_j = all_version_cells[j]
# if diff_versions:
# if cell_i.version_id == cell_j.version_id:
# continue
# diff = cell_i.coral_diff(cell_j,key=key)
# if diff == 1:
# matches.append((cell_i,cell_j))
# all_cells = all_cells + all_version_cells
# return matches
def sort_versions_by_version_id(dictionary):
tuples = list(dictionary.items())
return sorted(tuples, key=lambda x : int(x[0]))
def get_sequential_matching( kernel_trees, key=None, attr="coral_repr"):
matches = []
for slug,versions in kernel_trees.items():
sorted_versions = sort_versions_by_version_id(versions)
for a,b in zip(sorted_versions, sorted_versions[1:]):
a_version_id, a_cells = a
b_version_id, b_cells = b
for a_cell in a_cells:
for b_cell in b_cells:
diff = a_cell.coral_diff(b_cell, key=key, attr=attr)
if diff == 1:
matches.append((a_cell, b_cell))
return matches
def get_matching_cells(kernel_trees,diff_versions = False, key = None,attr="coral_repr"):
matches = []
all_cells = []
for slug,versions in kernel_trees.items():
all_version_cells = []
for version_id, cells in versions.items():
if cells:
for cell in cells:
all_version_cells.append(cell)
n = len(all_version_cells)
if n == 1:
continue
for i in range(n):
for j in range(i+1,n):
cell_i = all_version_cells[i]
cell_j = all_version_cells[j]
if diff_versions:
if cell_i.version_id == cell_j.version_id:
continue
diff = cell_i.coral_diff(cell_j,key=key,attr=attr)
if diff == 1:
matches.append((cell_i,cell_j))
all_cells = all_cells + all_version_cells
return matches
def parse_string(string):
global c, d
tree = ast.parse(string)
json_tree = []
def gen_identifier(identifier, node_type = 'identifier'):
pos = len(json_tree)
json_node = {}
json_tree.append(json_node)
json_node['type'] = node_type
json_node['value'] = identifier
return pos
def traverse_list(l, node_type = 'list'):
pos = len(json_tree)
json_node = {}
json_tree.append(json_node)
json_node['type'] = node_type
children = []
for item in l:
children.append(traverse(item))
if (len(children) != 0):
json_node['children'] = children
return pos
def traverse(node):
pos = len(json_tree)
json_node = {}
json_tree.append(json_node)
json_node['type'] = type(node).__name__
children = []
if isinstance(node, ast.Name):
json_node['value'] = node.id
elif isinstance(node, ast.Num):
json_node['value'] = str(node.n)
elif isinstance(node, ast.Str):
json_node['value'] = node.s
elif isinstance(node, ast.alias):
json_node['value'] = str(node.name)
if node.asname:
children.append(gen_identifier(node.asname))
elif isinstance(node, ast.FunctionDef):
json_node['value'] = str(node.name)
elif isinstance(node, ast.ClassDef):
json_node['value'] = str(node.name)
elif isinstance(node, ast.ImportFrom):
if node.module:
json_node['value'] = str(node.module)
elif isinstance(node, ast.Global):
for n in node.names:
children.append(gen_identifier(n))
elif isinstance(node, ast.keyword):
json_node['value'] = str(node.arg)
# Process children.
if isinstance(node, ast.For):
children.append(traverse(node.target))
children.append(traverse(node.iter))
children.append(traverse_list(node.body, 'body'))
if node.orelse:
children.append(traverse_list(node.orelse, 'orelse'))
elif isinstance(node, ast.If) or isinstance(node, ast.While):
children.append(traverse(node.test))
children.append(traverse_list(node.body, 'body'))
if node.orelse:
children.append(traverse_list(node.orelse, 'orelse'))
elif isinstance(node, ast.With):
children.append(traverse(node.context_expr))
if node.optional_vars:
children.append(traverse(node.optional_vars))
children.append(traverse_list(node.body, 'body'))
elif isinstance(node, ast.Try):
children.append(traverse_list(node.body, 'body'))
children.append(traverse_list(node.handlers, 'handlers'))
if node.orelse:
children.append(traverse_list(node.orelse, 'orelse'))
elif isinstance(node, ast.arguments):
children.append(traverse_list(node.args, 'args'))
children.append(traverse_list(node.defaults, 'defaults'))
if node.vararg:
children.append(gen_identifier(node.vararg, 'vararg'))
if node.kwarg:
children.append(gen_identifier(node.kwarg, 'kwarg'))
elif isinstance(node, ast.ExceptHandler):
if node.type:
children.append(traverse_list([node.type], 'type'))
if node.name:
children.append(traverse_list([node.name], 'name'))
children.append(traverse_list(node.body, 'body'))
elif isinstance(node, ast.ClassDef):
children.append(traverse_list(node.bases, 'bases'))
children.append(traverse_list(node.body, 'body'))
children.append(traverse_list(node.decorator_list, 'decorator_list'))
elif isinstance(node, ast.FunctionDef):
children.append(traverse(node.args))
children.append(traverse_list(node.body, 'body'))
children.append(traverse_list(node.decorator_list, 'decorator_list'))
else:
# Default handling: iterate over children.
for child in ast.iter_child_nodes(node):
if isinstance(child, ast.expr_context) or isinstance(child, ast.operator) or isinstance(child, ast.boolop) or isinstance(child, ast.unaryop) or isinstance(child, ast.cmpop):
# Directly include expr_context, and operators into the type instead of creating a child.
json_node['type'] = json_node['type'] + type(child).__name__
else:
children.append(traverse(child))
if isinstance(node, ast.Attribute):
children.append(gen_identifier(node.attr, 'attr'))
if (len(children) != 0):
json_node['children'] = children
return pos
traverse(tree)
return json_tree
def get_param_from_filename(param,filename):
template = "\?{}=(.*)\.|\?"
query_regex = re.compile(template.format(param))
try:
return re.findall(query_regex,filename)[0]
except IndexError:
return None
def get_slug_from_file(filename):
return re.split("\?|\.",filename)[0]
def load_cell_as_snippets(slug,version_id,path,max_size=512):
with open(path) as kernel_file:
cells = []
try:
res = json.load(kernel_file)
except ValueError:
return cells
if not (type(res) is dict) or not "cells" in res:
return cells
for cell in res["cells"]:
if not cell.get("source"):
continue
if type(cell["source"]) is list:
cell["source"] = "".join(cell["source"])
try:
cells.append(Snippet(slug,version_id,cell["source"],max_size=max_size))
except (SyntaxError, AttributeError):
continue
return cells
def get_slug_matches(competition_path,slug,ignore_function_args=False,
remove_exact_duplicates=False,
length_threshold=None, ignore_strings=False,max_size=512,
sequential_matches=False):
# in_path is a slug directory
kernel_version_snippets = {slug:{}}
for version_path in glob.glob(os.path.join(competition_path,slug,"*.json")):
filename = os.path.basename(version_path)
version_id = os.path.splitext(filename)[0]
if not version_id:
continue
version_snippets = load_cell_as_snippets(slug,version_id,version_path,max_size=max_size)
kernel_version_snippets[slug][version_id] = version_snippets
if ignore_function_args:
match_attr = "function_args_removed_repr"
else:
match_attr = "coral_repr"
if ignore_strings:
key = dont_count_strings
else:
key = None
if sequential_matches:
matches = get_sequential_matching(kernel_version_snippets,key=key, attr=match_attr)
else:
matches = get_matching_cells(kernel_version_snippets, diff_versions = True,
key=key, attr=match_attr)
if length_threshold:
matches=[x for x in matches if len(x[0].source.split("\n")) > 5]
if remove_exact_duplicates:
matches = remove_duplicate_matches(matches)
return matches
# def get_competition_matches(competition_path):
# slugs = [os.path.basename(x) for x in glob.glob(os.path.join(competition_path,"*"))]
# matches = []
# for slug in slugs:
# matches = matches + get_slug_matches(competition_path,slug)
# logger.info("Done with {}".format(competition_path))
# return matches
def get_competition_matches(ignore_function_args,length_threshold,remove_exact_duplicates,
ignore_strings, max_size, sequential_matches, competition_path):
slugs = [os.path.basename(x) for x in glob.glob(os.path.join(competition_path,"*"))]
matches = []
for slug in tqdm(slugs):
matches = matches + get_slug_matches(competition_path,slug,ignore_function_args,
remove_exact_duplicates, length_threshold, ignore_strings,
max_size,sequential_matches)
logger.info("Done with {}".format(competition_path))
return matches
# def get_competition_matcher(ignore_function_args,length_threshold,remove_exact_duplicates,
# ignore_strings):
# def get_competition_matches(ignore_function_args,length_threshold,remove_exact_duplicates,
# ignore_strings, competition_path):
# slugs = [os.path.basename(x) for x in glob.glob(os.path.join(competition_path,"*"))]
# matches = []
# for slug in slugs:
# matches = matches + get_slug_matches(competition_path,slug,ignore_function_args,
# remove_exact_duplicates, length_threshold, ignore_strings)
# logger.info("Done with {}".format(competition_path))
# return matches
# return get_competition_matches
def write_matches(out_path,matches):
with open(os.path.join(out_path,"matches.jsonl"), 'w') as the_file:
for match in matches:
the_file.write(json.dumps([match[0].to_dict(),match[1].to_dict()]))
the_file.write("\n")
@click.command()
@click.argument('in_path', type=click.Path())
@click.argument('out_path', type = click.Path())
@click.option('--ignore_function_args', is_flag = True, default=False, show_default=True)
@click.option('--length_threshold', default=None, show_default=True)
@click.option('--remove_exact_duplicates',is_flag = True, default=False, show_default=True)
@click.option('--ignore_strings', is_flag = True,default=False, show_default=True)
@click.option('--max_size', default=512, show_default=True)
@click.option('--sequential_matches', is_flag=True, default=False,show_default=True)
def main(in_path,
out_path,
ignore_function_args,
length_threshold,
remove_exact_duplicates,
ignore_strings,
max_size,
sequential_matches):
all_comp_paths = glob.glob(os.path.join(in_path,"*"))[1:2]
n = len(all_comp_paths)
# all_matches = map(get_competition_matches,all_comp_paths)
all_matches = []
comp_matcher = partial(get_competition_matches,ignore_function_args,
length_threshold,
remove_exact_duplicates,
ignore_strings,
max_size,
sequential_matches)
all_matches = [comp_matcher(all_comp_paths[0])]
# with Pool(16) as pool:
# for result in tqdm(pool.imap_unordered(comp_matcher,all_comp_paths),total =n):
# all_matches.append(result)
# pool.join()
# pool.close()
# with Pool(8) as worker_pool:
# all_matches = tqdm(worker_pool.imap_unordered(get_competition_matches,all_comp_paths),total =n)
all_matches = itertools.chain.from_iterable(all_matches)
write_matches(out_path,all_matches)
if __name__ == '__main__':
main()
| 17,587 | 5,403 |
# =================================================================
#
# Authors: Stephen Lloyd
# Ian Edwards
#
# Copyright (c) 2020, OpenCDMS Project
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
def windrose(
speed,
direction,
facet,
n_directions=12,
n_speeds=5,
speed_cuts="NA",
col_pal="GnBu",
ggtheme="grey",
legend_title="Wind Speed",
calm_wind=0,
variable_wind=990,
n_col=1,
):
"""
Plot a windrose showing the wind speed and direction for given
facets using ggplot2.
Args:
* speed
Numeric vector of wind speeds.
* direction
Numeric vector of wind directions.
* facet
Character or factor vector of the facets used to plot the
various windroses.
Kwargs:
* n_directions
The number of direction bins to plot (petals on the rose)
(default 12).
* n_speeds
The number of equally spaced wind speed bins to plot.
This is used if speed_cuts is NA (default 5).
* speed_cuts
Numeric vector containing the cut points for the wind speed
intervals (default "NA").
* col_pal
Character string indicating the name of the brewer.pal.info
colour palette to be used for plotting (default "GNBU").
* ggtheme
Character string (partially) matching the ggtheme to be used
for plotting, may be "grey", "gray", "bw", "linedraw", "light",
"minimal", "classic" (default "grey").
* legend_title
Character string to be used for the legend title
(default "Wind Speed").
* calm_wind
The upper limit for wind speed that is considered calm (default 0).
* variable_wind
Numeric code for variable winds (if applicable) (default 990).
* n_col
The number of columns of plots (default 1).
"""
# clifro::windrose(
# speed, direction, facet, n_directions=12, n_speeds=5, speed_cuts=NA,
# col_pal="GnBu", ggtheme=c(
# "grey", "gray", "bw", "linedraw", "light", "minimal", "classic"),
# legend_title="Wind Speed", calm_wind=0, variable_wind=990,
# n_col=1, ...)
return None
| 3,273 | 1,025 |
import os
from selenium import webdriver
from sa11y.analyze import Analyze
import urllib3
urllib3.disable_warnings()
class TestAccessibilitySa11y(object):
def test_analysis(self):
capabilities = {
'browserName': 'chrome',
'sauce:options': {
'username': os.environ["SAUCE_USERNAME"],
'accesskey': os.environ["SAUCE_ACCESS_KEY"],
}
}
sauce_url = 'https://ondemand.us-west-1.saucelabs.com/wd/hub'
driver = webdriver.Remote(sauce_url, capabilities)
driver.get('https://www.saucedemo.com/')
Analyze(driver).results()
driver.quit()
| 663 | 216 |
# 275. H-Index II
# Follow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?
class Solution(object):
# http://blog.csdn.net/titan0427/article/details/50650006
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
n = len(citations)
start, end = 1, n
while start <= end:
h = (start + end) / 2
if citations[n-h] < h:
end = h-1
elif n-h-1 >= 0 and citations[n-h-1] > h:
start = h+1
else:
return h
return 0
| 653 | 215 |
from django.conf import settings
from django import template
from news.models import NewsItem, NewsAuthor, NewsCategory
register = template.Library()
@register.tag
def get_news(parser, token):
"""
{% get_news 5 as news_items %}
"""
bits = token.split_contents()
if len(bits) == 3:
limit = None
elif len(bits) == 4:
try:
limit = abs(int(bits[1]))
except ValueError:
raise template.TemplateSyntaxError("If provided, second argument to `get_news` must be a positive whole number.")
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as' from 'get_news' template tag. Format is {% get_news 5 as news_items %}.")
return NewsItemNode(bits[-1], limit)
class NewsItemNode(template.Node):
"""
Returns a QuerySet of published NewsItems based on the lookup parameters.
"""
def __init__(self, varname, limit=None, author=None, category_slug=None, filters=None):
self.varname = varname
self.limit = limit
self.filters = filters
# author is either a literal NewsAuthor slug,
# or a template variable containing a NewsAuthor slug.
self.author = author
self.category = category_slug
def render(self, context):
# Base QuerySet, which will be filtered further if necessary.
news = NewsItem.on_site.published()
# Do we filter by author? If so, first attempt to resolve `author` as
# a template.Variable. If that doesn't work, use `author` as a literal
# NewsAuthor.slug lookup.
if self.author is not None:
try:
author_slug = template.Variable(self.author).resolve(context)
except template.VariableDoesNotExist:
author_slug = self.author
news = news.filter(author__slug=author_slug)
if self.category is not None:
try:
category_slug = template.Variable(self.category).resolve(context)
except template.VariableDoesNotExist:
category_slug = self.category
news = news.filter(category__slug=category_slug)
# Apply any additional lookup filters
if self.filters:
news = news.filter(**self.filters)
# Apply a limit.
if self.limit:
news = news[:self.limit]
context[self.varname] = news
return u''
def parse_token(token):
"""
Parses a token into 'slug', 'limit', and 'varname' values.
Token must follow format {% tag_name <slug> [<limit>] as <varname> %}
"""
bits = token.split_contents()
if len(bits) == 5:
# A limit was passed it -- try to parse / validate it.
try:
limit = abs(int(bits[2]))
except:
limit = None
elif len(bits) == 4:
# No limit was specified.
limit = None
else:
# Syntax is wrong.
raise template.TemplateSyntaxError("Wrong number of arguments: format is {%% %s <slug> [<limit>] as <varname> %%}" % bits[0])
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as': format is {%% %s <slug> [<limit>] as <varname> %%}" % bits[0])
return (bits[1], limit, bits[-1])
@register.tag
def get_posts_by_author(parser,token):
"""
{% get_posts_by_author <slug> [<limit>] as <varname> %}
{% get_posts_by_author foo 5 as news_items %} # 5 articles
{% get_posts_by_author foo as news_items %} # all articles
"""
author_slug, limit, varname = parse_token(token)
return NewsItemNode(varname, limit, author=author_slug)
@register.tag
def get_posts_by_category(parser,token):
"""
{% get_posts_by_category <slug> [<limit>] as <varname> %}
{% get_posts_by_category foo 5 as news_items %} # 5 articles
{% get_posts_by_category foo as news_items %} # all articles
"""
category_slug, limit, varname = parse_token(token)
return NewsItemNode(varname, limit, category_slug=category_slug)
@register.tag
def get_news_by_category(parser,token):
"""
This is because I got sick of having to debug issues due to the fact that I typed one or the other.
"""
return get_posts_by_category(parser,token)
@register.tag
def get_posts_by_tag(parser,token):
"""
{% get_posts_by_tag <tag> [<limit>] as <varname> %}
"""
tag, limit, varname = parse_token(token)
return NewsItemNode(varname, limit, filters={'tags__contains':tag})
@register.tag
def months_with_news(parser, token):
"""
{% months_with_news 4 as months %}
"""
bits = token.split_contents()
if len(bits) == 3:
limit = None
elif len(bits) == 4:
try:
limit = abs(int(bits[1]))
except ValueError:
raise template.TemplateSyntaxError("If provided, second argument to `months_with_news` must be a positive whole number.")
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as' from 'months_with_news' template tag. Format is {% months_with_news 5 as months %}.")
return MonthNode(bits[-1], limit=limit)
class MonthNode(template.Node):
def __init__(self,varname,limit=None):
self.varname = varname
self.limit = limit # for MonthNode inheritance
def render(self, context):
try:
months = NewsItem.on_site.published().dates('date', 'month', order="DESC")
except:
months = None
if self.limit is not None:
months = list(months)
months = months[:self.limit]
context[self.varname] = months
return ''
@register.tag
def get_categories(parser,token):
"""
{% get_categories as <varname> %}
{% get_categories 5 as <varname> %}
"""
bits = token.split_contents()
if len(bits) == 3:
limit = None
elif len(bits) == 4:
try:
limit = abs(int(bits[1]))
except ValueError:
raise template.TemplateSyntaxError("If provided, second argument to `get_categories` must be a positive whole number.")
if bits[-2].lower() != 'as':
raise template.TemplateSyntaxError("Missing 'as' from 'get_categories' template tag. Format is {% get_categories 5 as categories %}.")
return CategoryNode(bits[-1], limit=limit)
class CategoryNode(template.Node):
def __init__(self,varname,limit=None):
self.varname = varname
self.limit = limit
def render(self, context):
categories = NewsCategory.on_site.all()
if self.limit is not None:
categories = list(categories)
categories = categories[:self.limit]
context[self.varname] = categories
return ''
@register.inclusion_tag('news/news_ul.html')
def news_ul(slug):
try:
return {'category': NewsCategory.objects.get(slug=slug)}
except NewsCategory.DoesNotExist:
return {} | 6,172 | 2,211 |
import math
import falcon
import jsonschema
class Model(object):
db_table=""# db table name
properties={} #schema properties
schema={} #to hold schema for each model
required=[] #list of required fields
def __init__(self,db):
self._db=db
self.validated_data={}
self.read_only=[]
self.write_only=[]
def get_schema(self):
#get schema by providing required fields. Provideed as a list. e.g ["id","name"]
self.schema.update({"properties":self.properties,'required':self.required,"title":self.db_table,"type": "object"})
return self.schema
def create(self,):
pass
def update(self,pk):
pass
def delete(self,pk):
pass
def all(self):
return self._db.table(self.db_table).select()
def validate(self,data):
try:
schema=self.get_schema()
jsonschema.validate(data, self.get_schema(), format_checker=jsonschema.FormatChecker())
#if above passes
properties=schema.get('properties')
for k,v in properties.items():
if v.get('readOnly'):
self.read_only.append(k)
if v.get('writeOnly'):
self.write_only.append(k)
#remove items in data that are not in schema properties
self.validated_data={k:v for k,v in data.items() if properties.get(k)}
except jsonschema.ValidationError as e:
raise falcon.HTTPBadRequest('Data validation failed',description=e.message)
def get_cleaned_data(self,remove_keys):
return {k:v for k,v in self.validated_data.items() if k not in remove_keys}
| 1,731 | 524 |
import os
from goldminer import game
if __name__ == "__main__":
print("Initializing")
print("Working directory: " + os.getcwd())
game.start()
| 155 | 52 |
#!/usr/bin/env python
from math import ceil
import os
import sys
import argparse
import multiprocessing
import subprocess as sp
import re
#from pprint import pprint
from array import array
from yaml import load, dump
contexts = ('CG','CHG','CHH')
def main():
fCheck = fileCheck() #class for checking parameters
parser = argparse.ArgumentParser(description="Wrapper for Bisulfite Methylation Alignment.")
parser.add_argument('-R', metavar='FASTA', help='Reference for alignment', required=True, type=fCheck.fasta)
parser.add_argument('-r1', metavar='FASTQ', help='Single or first fastq from pair', required=True, type=fCheck.fastq)
parser.add_argument('-r2', metavar='FASTQ', help='Second read', type=fCheck.fastq)
parser.add_argument('-O', metavar='STR', help='Output directory (Default: %(default)s)', default='.', type=str)
parser.add_argument('-N', '--name', metavar='STR', help='Name for run')
parser.add_argument('-U', '--uniq', action='store_true', help="Only use unique alignments")
parser.add_argument('-q', help="Fastq Quality Encoding (Default: %(default)s)", default=33, type=int)
parser.add_argument('-C', metavar='Chrom', help="Chromosome to use for checking bisulfite conversion rate")
parser.add_argument('-S', dest='tileSize', metavar='N', type=int, help="Window size (Default: %(default)s)", default=100)
parser.add_argument('-d', metavar='N', type=int, help="Minimum coverage in tile for methylation to be printed (Default: %(default)s - all)", default=1)
parser.add_argument('--CG', metavar='N', type=int, help="Minimum sites per tile (Default: %(default)s)", default=3)
parser.add_argument('--CHG', metavar='N', type=int, help="Minimum sites per tile (Default: %(default)s)", default=3)
parser.add_argument('--CHH', metavar='N', type=int, help="Minimum sites per tile (Default: %(default)s)", default=6)
args = parser.parse_args()
######################################################
# Path Section
######################################################
if not args.name:
args.name = os.path.splitext(args.r1)[0]
if not os.path.exists(args.O): os.makedirs(args.O)
outPrefix = os.path.join(args.O, args.name)
######################################################
# Arguments Section
######################################################
config = {'bsmap':{}, 'methratio':{}, 'tiles':{}}
#-----------------------------------------------------
# Arguments for running BSMAP
#-----------------------------------------------------
config['bsmap']['-a'] = {'value':args.r1, 'description':'R1 input'}
config['bsmap']['-z'] = {'value':str(args.q), 'description':'Fastq quality encoding'}
config['bsmap']['-p'] = {'value':str(multiprocessing.cpu_count()), 'description':'Number of threads'}
config['bsmap']['-q'] = {'value':'20', 'description':"Quality threshold for trimming 3' ends of reads"}
config['bsmap']['-d'] = {'value':args.R, 'description':'Reference'}
config['bsmap']['-S'] = {'value':'77345', 'description':'Hardcoded random seed for mapping reproducibility'}
config['bsmap']['-w'] = {'value':'10000', 'description':'Number of candidate seeds to align against'}
#config['bsmap']['-V'] = {'value':'1', 'description':'Print major messages'}
#config['bsmap']['-o'] = {'value':args.name+".sam", 'description':'Output BAM'} # default SAM stdout is piped to samtools
#-----------------------------------------------------
# Arguments for methratio.py
#-----------------------------------------------------
#config['methratio']['-q'] = {'value':'', 'description':'Quiet'}
config['methratio']['-z'] = {'value':'', 'description':'Report locations with zero methylation'}
config['methratio']['-r'] = {'value':'', 'description':'Remove duplicate reads'}
config['methratio']['-d'] = {'value':args.R, 'description':'Reference'}
config['methratio']['-o'] = {'value':outPrefix+"_methratio.txt", 'description':'Output methylation ratio file'}
#-----------------------------------------------------
# Paired specific arguments
#-----------------------------------------------------
if args.r2:
config['bsmap']['-b'] = {'value':args.r2, 'description':'R2 input'}
config['methratio']['-p'] = {'value':'', 'description':'Require propper pairings'}
if args.uniq:
config['bsmap']['-r'] = {'value':'0', 'description':'No non-unique hits reported'}
config['methratio']['-u'] = {'value':'', 'description':'Only use unique alignments'}
else:
config['bsmap']['-r'] = {'value':'2', 'description':'non-unique hits reported'}
config['bsmap']['-w'] = {'value':'20', 'description':'Only 20 equal best hits reported'}
#-----------------------------------------------------
# Tile Section
#-----------------------------------------------------
config['tiles']['size'] = {'value':args.tileSize, 'description':'Size of tiles for summarizing methylation'}
config['tiles']['minCoverage'] = {'value':args.d, 'description':'Minimum Coverage'}
config['tiles']['CG'] = {'value':args.CG, 'description':'Minimum number of sites per tile'}
config['tiles']['CHG'] = {'value':args.CHG, 'description':'Minimum number of sites per tile'}
config['tiles']['CHH'] = {'value':args.CHH, 'description':'Minimum number of sites per tile'}
######################################################
# Check for Dependencies
######################################################
for d in ('bsmap','samtools','methratio.py','bedGraphToBigWig'):
if not which(d):
sys.exit("Please add %s to your path\n"%(d))
# Parse FAI
fai = args.R+'.fai'
if not os.path.exists(fai):
os.system("samtools faidx %s"%(args.R))
######################################################
# Run workflow
######################################################
faiDict = ParseFai(fai)
#-----------------------------------------------------
# run BSMAP
#-----------------------------------------------------
runBSMAP(config, outPrefix, args.r2)
#-----------------------------------------------------
# run methratio.py and calculate conversion rate
#-----------------------------------------------------
runRatio(config)
if args.C:
calcConversion(config, args.C, faiDict)
#-----------------------------------------------------
# Make Tiles and Bedgraphs
#-----------------------------------------------------
makeTile(config, outPrefix, faiDict)
#-----------------------------------------------------
# Make bigWig
#-----------------------------------------------------
makeBigWig(config,fai)
#-----------------------------------------------------
# Write YAML
#-----------------------------------------------------
dump(config, open(outPrefix+'.yaml','w'), default_flow_style=False, width=1000)
def calcConversion(config, chrom, faiDict):
if not chrom in faiDict:
chromStr = '\n - '.join(faiDict.keys())
sys.exit("Chromosome: %s not in reference. Please choose a chromosome from:\n - %s"%(chrom, chromStr))
ratioFile = config['methratio']['-o']['value']
p = sp.Popen(["grep", "^%s\s"%chrom, ratioFile], stdout=sp.PIPE).stdout
cSum = 0
ctSum = 0
for line in p:
tmp = line.split('\t')
cSum += int(tmp[6])
ctSum += int(tmp[7])
percent = round((1.0-float(cSum)/(float(ctSum)+1.0))*100.0, 2)
config['conversion'] = {}
config['conversion']['Chromosome'] = {'value':chrom, 'description':'Chromosome to calculate conversion efficiency from. No methylation should be expected on this chromosome.'}
config['conversion']['C'] = {'value':cSum, 'description':'Number of methylated cytosines'}
config['conversion']['CT'] = {'value':ctSum, 'description':'Number of un/methylated cytosines'}
config['conversion']['percent'] = {'value':percent, 'description':'Conversion rate: (1-C/CT)*100'}
p.close()
def runRatio(config):
ratioCMD = makeCMD('methratio.py', config, 'methratio')+[config['bsmap_stats']['output']['value']]
ratioOUT = sp.check_output(ratioCMD, stderr=sp.STDOUT)
statLine = ratioOUT.split('\n')[-2]
m = re.match(r".+total\s([0-9]+)\s.+,\s([0-9]+)\s.+age:\s(\w+\.\w+) fold", statLine)
mappings, covered, coverage = m.groups()
config['methratio_stats'] = {}
config['methratio_stats']['mappings'] = {'value':mappings, 'description':'Number of valid mappings'}
config['methratio_stats']['covered'] = {'value':covered, 'description':'Number of cytosines covered'}
config['methratio_stats']['coverage'] = {'value':coverage, 'description':'Average coverage fold'}
def runBSMAP(config, outPrefix, r2):
bsmapCMD = makeCMD('bsmap', config, 'bsmap')
bsP = sp.Popen(bsmapCMD, stderr=sp.PIPE, stdout=sp.PIPE)
cpus = str(multiprocessing.cpu_count())
samP = sp.Popen('samtools view -uS - | samtools sort -m 200M -@ %s -O bam -o %s.bam -T %s_tmp'%(cpus, outPrefix, outPrefix), shell=True, stdin=bsP.stdout, stdout=open(outPrefix+'.bam','wb'), stderr=sp.PIPE)
bsP.stdout.close()
bsOUT = bsP.stderr.read()
samP.wait()
if r2:
total, aligned, unique, mult = map(int, re.findall(r'pairs:\s+([0-9]+)', bsOUT))
unit='pairs'
else:
total, aligned, unique, mult = map(int, re.findall(r'reads:\s+([0-9]+)', bsOUT))
unit='reads'
config['bsmap_stats'] = {}
config['bsmap_stats']['output'] = {'value':outPrefix+".bam", 'description':'Output BAM'}
config['bsmap_stats']['input'] = {'value':total, 'description':'Total number of %s in input'%(unit)}
config['bsmap_stats']['aligned'] = {'value':aligned, 'description':'Total number of %s aligned'%(unit)}
config['bsmap_stats']['unique'] = {'value':unique, 'description':'Total number of %s uniquely aligned'%(unit)}
config['bsmap_stats']['mult'] = {'value':mult, 'description':'Total number of %s with multiple alignments'%(unit)}
def makeCMD(baseBin, config, section):
outCMD = [baseBin]
cSec = config[section]
for key in cSec.keys():
outCMD.append(key)
v = cSec[key]['value']
if v: outCMD.append(v)
return outCMD
def ParseFai(inFile):
'''
Parses a fa.fai into a python dictionary
Paramteters
================================
inFile FILE fai file
'''
return dict(map(lambda y: (y[0], int(y[1])), map(lambda y: y.split('\t'), open(inFile,'r').readlines())))
class fileCheck:
def check(self, file, exts):
ext = os.path.splitext(file)[1][1:]
fName = os.path.split(file)[1]
if not ext in exts:
raise argparse.ArgumentTypeError("%s not a %s"%(fName, exts[0]))
if not os.path.exists(file):
raise argparse.ArgumentTypeError("%s does not exist"%(file))
def fastq(self, file):
self.check(file, ['fastq','fq'])
return file
def fasta(self, file):
self.check(file, ['fasta','fa'])
return file
def makeBigWig(config,fai):
bedgraphs = config['tiles']['output']['bedgraphs']['value']
pool = []
bws = []
for bg in bedgraphs:
bw = os.path.splitext(bg)[0]+'.bw'
bws.append(bw)
pool.append(sp.Popen(['bedGraphToBigWig',bg,fai,bw]))
for p in pool:
p.wait()
config['bigwigs'] = {'value':bws,'description':'Bigwig versions of bedgraph files for jbrowse to load'}
def makeTile(config, outPrefix, faiDict):
# Make sure to do something with the coverage variable
bgNames = map(lambda x: outPrefix+'_'+x+'.bedgraph', contexts)
config['tiles']['output'] = {\
'bedgraphs':{'value':bgNames, 'description':'Mehtylation ratios for each methylation motif {CG, CHG, CHH} in bedgraph format.'},\
'tab':{'value':outPrefix+'.tab', 'description':'Tab delimited file of methylation ratios and coverage for each tile.'}}
buffer = 100000
bGs = map(lambda x: open(x, 'w', buffer), bgNames)
tab = open(outPrefix+'.tab', 'w', buffer)
# Write header
#headStr = '\t'.join(['Chr','Start','End']+[ c+'_'+t for c in contexts for t in ('ratio','C','CT')]) ## old out format
headStr = '\t'.join(['Chr','Start','End']+[ c+'_'+t for c in contexts for t in ('ratio','C','CT','sites')]) ## new out format
tab.write(headStr+'\n')
#######################################
# Get parameters
#######################################
tileSize = config['tiles']['size']['value']
ratioFile = config['methratio']['-o']['value']
nSitesT = map(lambda y: config['tiles'][y]['value'], contexts)
sortedChroms = sorted(faiDict.keys())
#######################################
# start writing by chromosome
#######################################
for chrom in sortedChroms:
#----------------------------------
# Create data arrays
#----------------------------------
offset = int(ceil(faiDict[chrom]/float(tileSize))) # number of tiles
C, CT, nSites = makeDataArrays(offset)
#----------------------------------
# Read Chrom and populate arrays
#----------------------------------
p = sp.Popen(["grep", "^%s\s"%chrom, ratioFile], stdout=sp.PIPE).stdout
for line in p:
chr, pos, cIndex, c, ct = formatLine(line)
index = offset*cIndex+pos/tileSize
C[index] += c
CT[index] += ct
nSites[index] += 1
p.close()
# zCheck is true if loc-1 had zero methylation
zCheck = [False, False, False]
for posIndex in xrange(offset): # tile index
start = posIndex*tileSize
end = min(start+tileSize, faiDict[chrom])
tabStr = '%s\t%i\t%i'%(chrom,start,end)
for cIndex in range(3):
loc = offset*cIndex+posIndex # data index
tabStr += makeTabStr(C[loc], CT[loc], nSites[loc])
#-------------------------
# Generate BG
#-------------------------
if C[loc]: # if methylated
if nSites[loc] < nSitesT[cIndex]:
if not zCheck[cIndex]:
bgStr = '%s\t%i\t'%(chrom,start)
zCheck[cIndex] = True
bGs[cIndex].write(bgStr)
else:
if zCheck[cIndex]: # if previous was 0
bgStr = '%i\t0\n'%(start,)
zCheck[cIndex] = False
bGs[cIndex].write(bgStr)
ratio = float(C[loc])/float(CT[loc])
bgStr = '%s\t%i\t%i\t%.2f\n'%(chrom,start,end,ratio)
bGs[cIndex].write(bgStr)
else:
if not zCheck[cIndex]:
bgStr = '%s\t%i\t'%(chrom,start)
zCheck[cIndex] = True
bGs[cIndex].write(bgStr)
#-------------------------
tab.write(tabStr+'\n')
#---------------------------------
# Write out orphaned zeros
#---------------------------------
for cIndex in range(3):
if zCheck[cIndex]:
bgStr = '%i\t0\n'%(end,)
bGs[cIndex].write(bgStr)
######################################
# Close files
######################################
for bg in bGs:
bg.close()
tab.close()
def makeTabStr(C, CT, nSites):
'''
Generates a tab-separated string for the .tab file.
'''
if C:
ratio = float(C)/float(CT)
return '\t%.2f\t%i\t%i\t%i'%(ratio, C, CT, nSites)
return '\t0\t%i\t%i\t%i'%(C, CT, nSites)
def formatLine(line):
tmp = line.split('\t')
chr = tmp[0]
pos = int(tmp[1])-1
cIndex = contexts.index(tmp[3])
c = int(tmp[6])
ct = int(tmp[7])
return (chr, pos, cIndex, c, ct)
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def makeDataArrays(offset):
'''
Function for creating arrays that keep track of data from
methratio.py output.
>>> makeDataArrays(1)
(array('H', [0, 0, 0]), array('H', [0, 0, 0]), array('H', [0, 0, 0]))
'''
C = array('H', [0]*(offset*3))
CT = array('H', [0]*(offset*3))
nSites = array('H', [0]*(offset*3)) # max is tile size
return (C, CT, nSites)
if __name__ == "__main__":
main()
| 15,337 | 5,502 |
import json
import pytest
from microdata_validator import Metadata, PatchingError
RESOURCE_DIR = 'tests/resources/metadata_model'
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described.json') as f:
TRANSFORMED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_update.json') as f:
UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated.json') as f:
ENUMERATED_TRANSFORMED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated_update.json') as f:
ENUMERATED_UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_enumerated_patched.json') as f:
PATCHED_ENUMERATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_patched.json') as f:
PATCHED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_illegal_update.json') as f:
# New variable name on line 18
ILLEGALLY_UPDATED_METADATA = json.load(f)
with open(f'{RESOURCE_DIR}/KREFTREG_DS_described_deleted_object.json') as f:
# Deleted keyType object line 34
DELETED_OBJECT_METADATA = json.load(f)
def test_object():
transformed_metadata = Metadata(TRANSFORMED_METADATA)
assert (
transformed_metadata.get_identifier_key_type_name()
== 'SYKDOMSTILFELLE'
)
assert transformed_metadata.to_dict() == TRANSFORMED_METADATA
def test_patch_described():
transformed_metadata = Metadata(TRANSFORMED_METADATA)
updated_metadata = Metadata(UPDATED_METADATA)
transformed_metadata.patch(updated_metadata)
assert transformed_metadata.to_dict() == PATCHED_METADATA
def test_patch_enumerated():
transformed_metadata = Metadata(ENUMERATED_TRANSFORMED_METADATA)
updated_metadata = Metadata(ENUMERATED_UPDATED_METADATA)
transformed_metadata.patch(updated_metadata)
assert transformed_metadata.to_dict() == PATCHED_ENUMERATED_METADATA
def test_patch_with_deleted_object():
with pytest.raises(PatchingError) as e:
transformed_metadata = Metadata(TRANSFORMED_METADATA)
updated_metadata = Metadata(DELETED_OBJECT_METADATA)
transformed_metadata.patch(updated_metadata)
assert 'Can not delete KeyType' in str(e)
def test_patch_with_None():
with pytest.raises(PatchingError) as e:
transformed_metadata = Metadata(TRANSFORMED_METADATA)
transformed_metadata.patch(None)
assert 'Can not patch with NoneType Metadata' in str(e)
def test_illegaly_patch():
with pytest.raises(PatchingError) as e:
transformed_metadata = Metadata(TRANSFORMED_METADATA)
illegally_updated_metadata = Metadata(ILLEGALLY_UPDATED_METADATA)
transformed_metadata.patch(illegally_updated_metadata)
assert (
'Illegal change to one of these variable fields: '
'[name, dataType, format, variableRole]'
) in str(e)
def test_patch_metadata_with_code_list():
updated = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated_update.json')
original = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated.json')
expected = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated_patched.json')
orig = Metadata(original)
orig.patch(Metadata(updated))
assert orig.to_dict() == expected
def test_patch_metadata_without_code_list():
updated = load_file(f'{RESOURCE_DIR}/SYNT_PERSON_INNTEKT_described_update.json')
original = load_file(f'{RESOURCE_DIR}/SYNT_PERSON_INNTEKT_described.json')
expected = load_file(f'{RESOURCE_DIR}/SYNT_PERSON_INNTEKT_described_patched.json')
orig = Metadata(original)
orig.patch(Metadata(updated))
assert orig.to_dict() == expected
def test_patch_metadata_illegal_fields_changes():
"""
The "updated" contains randomly chosen fields that are not allowed to be changed.
"""
updated = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated_illegal_update.json')
original = load_file(f'{RESOURCE_DIR}/SYNT_BEFOLKNING_KJOENN_enumerated.json')
with pytest.raises(PatchingError) as e:
orig = Metadata(original)
orig.patch(Metadata(updated))
assert 'Can not change these metadata fields [name, temporality, languageCode]' in str(e)
def load_file(file_name: str):
with open(file_name) as f:
source = json.load(f)
return source
| 4,316 | 1,605 |
# metrics/__init__.py
# author: Playinf
# email: playinf@stu.xmu.edu.cn
from .metrics import create_tagger_evaluation_metrics
| 127 | 48 |
import sys
import pytest
from briefcase.platforms.macOS.dmg import macOSDmgCreateCommand
if sys.platform != 'darwin':
pytest.skip("requires macOS", allow_module_level=True)
def test_binary_path(first_app_config, tmp_path):
command = macOSDmgCreateCommand(base_path=tmp_path)
binary_path = command.binary_path(first_app_config)
assert binary_path == tmp_path / 'macOS' / 'First App' / 'First App.app'
def test_distribution_path(first_app_config, tmp_path):
command = macOSDmgCreateCommand(base_path=tmp_path)
distribution_path = command.distribution_path(first_app_config)
assert distribution_path == tmp_path / 'macOS' / 'First App-0.0.1.dmg'
| 680 | 237 |
from flask import Flask
app = Flask(__name__)
if app.config["ENV"] == "production":
app.config.from_object("config.ProductionConfig")
elif app.config["ENV"] == "testing":
app.config.from_object("config.TestingConfig")
else:
app.config.from_object("config.DevelopmentConfig")
from app import views
from app import admin_views | 336 | 111 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : conf
# @Time : 2021/1/31 10:20 下午
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from meutils.pipe import *
# 定义参数
class TrainConf(BaseConfig):
epoch = 10
batch_size = 128
def train(**kwargs):
logger.info("开始训练")
time.sleep(3)
# 使用参数
def run(**kwargs):
logger.info(f"输入参数: {kwargs}")
c = TrainConf.parse_obj(kwargs)
logger.info(f"使用参数: {c.dict()}")
train(**c.dict())
# 传入参数
conf_cli = lambda: fire.Fire(run) # <conf_cli> --epoch 11 --batch_size 111
# fire.Fire()需要指定命令对象
| 674 | 315 |
install Request module
pip install requests
import requests
r = requests.get('https://xkcd.com/353/')
print(r)
print(r.text)
#Download image
r = requests.get('https://xkcd.com/comics/python.png')
print(r.content)
with open('comic.png', 'wb') as f:
f.write(r.content)
print(r.status_code)
print(r.ok) # Print True for any response <400
print(r.headers)
https://httpbin.org
# How to pass query parameter
payload = {'page' : 2, 'count' :25}
r = requests.get('https://httpbin.org/get', params=payload)
print(r.text)
####### Post
payload = {'username' : 'madhu', 'password' :'testing'}
r = requests.post('https://httpbin.org/post', data=payload)
r_dict = r.json()
print(r_dict['form'])
## timeout
r = requests.get('https://xkcd.com/comics/python.png', timeout=3)
# if the request don't respond within 3 sec, timeout
| 888 | 346 |
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
from rest_framework_jwt.views import verify_jwt_token
# Configuration API Router
from rest_framework import routers
#router = routers.DefaultRouter()
#router.register(r'artists', ArtistViewSet)
#router.register(r'albums', AlbumViewSet)
#router.register(r'songs', SongViewSet)
urlpatterns = [
url(r'^', include('index.urls')),
url(r'^admin/', admin.site.urls),
#url(r'^api/', include(router.urls)),
# AUTH
url(r'^cuenta/', include('allauth.urls')),
url(r'^api-token-auth/', obtain_jwt_token),
url(r'^api-token-refresh/', refresh_jwt_token),
url(r'^api-token-verify/', verify_jwt_token),
# Apps
url(r'^usuario/', include('usuario.urls')),
url(r'^stock/', include('stock.urls')),
url(r'^contabilidad/', include('contabilidad.urls')),
]
from django.conf import settings
from django.conf.urls.static import static
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 1,231 | 430 |
import numpy as np
from scipy.optimize import curve_fit, minimize_scalar
h_planck = 4.135667662e-3 # eV/ps
h_planck_bar = 6.58211951e-4 # eV/ps
kb_boltzmann = 8.6173324e-5 # eV/K
def get_standard_errors_from_covariance(covariance):
# return np.linalg.eigvals(covariance)
return np.sqrt(np.diag(covariance))
#return np.sqrt(np.trace(covariance))
class Lorentzian:
def __init__(self,
test_frequencies_range,
power_spectrum,
guess_position=None,
guess_height=None):
self.test_frequencies_range = test_frequencies_range
self.power_spectrum = power_spectrum
self.guess_pos = guess_position
self.guess_height = guess_height
self._fit_params = None
self._fit_covariances = None
self.curve_name = 'Lorentzian'
def _function(self, x, a, b, c, d):
"""Lorentzian function
x: frequency coordinate
a: peak position
b: half width
c: area proportional parameter
d: base line
"""
return c/(np.pi*b*(1.0+((x - a)/b)**2))+d
def get_fitting_parameters(self):
if self._fit_params is None:
if self.guess_pos is None or self.guess_height is None:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum)
else:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum,
p0=[self.guess_pos, 0.1, self.guess_height, 0.0])
self._fit_covariances = fit_covariances
self._fit_params = fit_params
return self._fit_params, self._fit_covariances
def get_fitting(self):
from scipy.integrate import quad
try:
fit_params, fit_covariances = self.get_fitting_parameters()
maximum = fit_params[2]/(fit_params[1]*np.pi)
width = 2.0*fit_params[1]
frequency = fit_params[0]
area = fit_params[2]
standard_errors = get_standard_errors_from_covariance(fit_covariances)
global_error = np.average(standard_errors[:2])/np.sqrt(area)
if np.isnan(global_error):
raise RuntimeError
#error = get_error_from_covariance(fit_covariances)
base_line = fit_params[3]
return {'maximum': maximum,
'width': width,
'peak_position': frequency,
'standard_errors': standard_errors,
'global_error': global_error,
'area': area,
'base_line': base_line,
'all_good': True}
except RuntimeError:
return {'all_good': False}
def get_curve(self, frequency_range):
return self._function(frequency_range, *self.get_fitting_parameters()[0])
class Lorentzian_asymmetric:
def __init__(self,
test_frequencies_range,
power_spectrum,
guess_position=None,
guess_height=None):
self.test_frequencies_range = test_frequencies_range
self.power_spectrum = power_spectrum
self.guess_pos = guess_position
self.guess_height = guess_height
self._fit_params = None
self._fit_covariances = None
self.curve_name = 'Assym. Lorentzian'
def _g_a (self, x, a, b, s):
"""Asymmetric width term
x: frequency coordinate
a: peak position
b: half width
s: asymmetry parameter
"""
return 2*b/(1.0+np.exp(s*(x-a)))
def _function(self, x, a, b, c, d, s):
"""Lorentzian asymmetric function
x: frequency coordinate
a: peak position
b: half width
c: area proportional parameter
d: base line
s: asymmetry parameter
"""
return c/(np.pi*self._g_a(x, a, b, s)*(1.0+((x-a)/(self._g_a(x, a, b, s)))**2))+d
def get_fitting_parameters(self):
if self._fit_params is None:
if self.guess_pos is None or self.guess_height is None:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum)
else:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum,
p0=[self.guess_pos, 0.1, self.guess_height, 0.0, 0.0])
self._fit_covariances = fit_covariances
self._fit_params = fit_params
return self._fit_params, self._fit_covariances
def get_fitting(self):
from scipy.integrate import quad
try:
fit_params, fit_covariances = self.get_fitting_parameters()
peak_pos = minimize_scalar(lambda x: -self._function(x, *fit_params), fit_params[0],
bounds=[self.test_frequencies_range[0], self.test_frequencies_range[-1]],
method='bounded')
frequency = peak_pos["x"]
maximum = -peak_pos["fun"]
width = 2.0 * self._g_a(frequency, fit_params[0], fit_params[1], fit_params[4])
asymmetry = fit_params[4]
area, error_integration = quad(self._function, 0, self.test_frequencies_range[-1],
args=tuple(fit_params),
epsabs=1e-8)
# area = fit_params[2]
standard_errors = get_standard_errors_from_covariance(fit_covariances)
global_error = np.average(standard_errors[:2])/np.sqrt(area)
if np.isnan(global_error):
raise RuntimeError
#error = get_error_from_covariance(fit_covariances)
base_line = fit_params[3]
return {'maximum': maximum,
'width': width,
'peak_position': frequency,
'global_error': global_error,
'area': area,
'base_line': base_line,
'asymmetry': asymmetry,
'all_good': True}
except RuntimeError:
return {'all_good': False}
def get_curve(self, frequency_range):
return self._function(frequency_range, *self.get_fitting_parameters()[0])
class Damped_harmonic:
def __init__(self,
test_frequencies_range,
power_spectrum,
guess_position=None,
guess_height=None):
self.test_frequencies_range = test_frequencies_range
self.power_spectrum = power_spectrum
self.guess_pos = guess_position
self.guess_height = guess_height
self._fit_params = None
self._fit_covariances = None
self.curve_name = 'Damped Harm. Osc.'
def _function(self, x, a, b, c, d):
"""Damped harmonic oscillator PS function
x: frequency coordinate
a: peak position
b: half width
c: area proportional parameter
d: base line
"""
return c/((a**2-x**2)**2 + (b*x)**2)+d
def get_fitting_parameters(self):
if self._fit_params is None:
if self.guess_pos is None or self.guess_height is None:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum)
else:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum,
p0=[self.guess_pos, 0.1, self.guess_height, 0.0])
self._fit_covariances = fit_covariances
self._fit_params = fit_params
return self._fit_params, self._fit_covariances
def get_fitting(self):
from scipy.integrate import quad
try:
fit_params, fit_covariances = self.get_fitting_parameters()
self._fit_params = fit_params
width = abs(fit_params[1])
maximum = fit_params[2]/(width*np.pi)
frequency = fit_params[0]
maximum = self.get_curve(frequency)
area, error_integration = quad(self._function, 0, self.test_frequencies_range[-1],
args=tuple(fit_params),
epsabs=1e-8)
# area = fit_params[2]*np.pi/(fit_params[0]**3*width)
standard_errors = get_standard_errors_from_covariance(fit_covariances)
global_error = np.average(standard_errors[:2])/np.sqrt(area)
if np.isnan(global_error):
raise RuntimeError
base_line = fit_params[3]
return {'maximum': maximum,
'width': width,
'peak_position': frequency,
'global_error': global_error,
'area': area,
'base_line': base_line,
'all_good': True}
except RuntimeError:
return {'all_good': False}
def get_curve(self, frequency_range):
return self._function(frequency_range, *self.get_fitting_parameters()[0])
class Gaussian_function:
def __init__(self,
test_frequencies_range,
power_spectrum,
guess_position=None,
guess_height=None):
self.test_frequencies_range = test_frequencies_range
self.power_spectrum = power_spectrum
self.guess_pos = guess_position
self.guess_height = guess_height
self._fit_params = None
self._fit_covariances = None
self.curve_name = 'Gaussian dist.'
def _function(self, x, a, b, c, d):
"""Gaussian PDF function
x: coordinate
a: peak position
b: deviation (sigma)
c: area proportional parameter
d: base line
"""
return c/b*np.sqrt(2*np.pi)*np.exp(-(x-a)**2/(2*b**2))+d
def get_fitting_parameters(self):
if self._fit_params is None:
if self.guess_pos is None or self.guess_height is None:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum)
else:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum,
p0=[self.guess_pos, 0.1, self.guess_height, 0.0])
self._fit_covariances = fit_covariances
self._fit_params = fit_params
return self._fit_params, self._fit_covariances
def get_fitting(self):
from scipy.integrate import quad
try:
fit_params, fit_covariances = self.get_fitting_parameters()
self._fit_params = fit_params
width = abs(fit_params[1])
frequency = fit_params[0]
maximum = self.get_curve(frequency)
area, error_integration = quad(self._function, 0, self.test_frequencies_range[-1],
args=tuple(fit_params),
epsabs=1e-8)
# area = fit_params[2]*np.pi/(fit_params[0]**3*width)
standard_errors = get_standard_errors_from_covariance(fit_covariances)
global_error = np.average(standard_errors[:2])/np.sqrt(area)
if np.isnan(global_error):
raise RuntimeError
base_line = fit_params[3]
return {'maximum': maximum,
'width': width,
'peak_position': frequency,
'global_error': global_error,
'area': area,
'base_line': base_line,
'all_good': True}
except RuntimeError:
return {'all_good': False}
def get_curve(self, frequency_range):
return self._function(frequency_range, *self.get_fitting_parameters()[0])
fitting_functions = {
0: Lorentzian,
1: Lorentzian_asymmetric,
2: Damped_harmonic,
}
# Test for automatic detection (order can change)
# import sys, inspect
# list_fitting = inspect.getmembers(sys.modules[__name__], inspect.isclass)
# Fitting_functions = {}
# for i, p in enumerate(list_fitting):
# Fitting_functions[i] = p[1]
| 13,432 | 3,891 |
from __init__ import print_msg_box
class Node:
def __init__(self, dataValue=None):
self.dataValue = dataValue
self.next = None
class singleLinkedList:
def __init__(self):
self.headValue = None
self.temp = None
def insertLast(self, *elements):
for data in elements:
if self.headValue is None:
self.headValue = Node(data)
self.temp = self.headValue
else:
self.temp.next = Node(data)
self.temp = self.temp.next
self.temp.next = self.headValue
pass
def insertFirst(self, *elements):
if self.headValue is not None:
prevheadValue = self.headValue
self.headValue = None
else:
prevheadValue = None
for data in elements:
if self.headValue is None:
self.headValue = Node(data)
self.temp = self.headValue
else:
self.temp.next = Node(data)
self.temp = self.temp.next
if prevheadValue is not None:
self.temp.next = prevheadValue
self.temp = self.temp.next
while self.temp.next != prevheadValue:
self.temp = self.temp.next
self.temp.next = self.headValue
def insertMiddle(self, arg1: "data", arg2: "position"):
node = self.headValue
for i in range(1,arg2-1):
if node.next is None:
return
node = node.next
prev = node.next
node.next = Node(arg1)
node = node.next
node.next = prev
while node.next != self.headValue:
node = node.next
node.next = self.headValue
def delete(self, position: "Position to be deleted"):
#[data|next] --> [data|next] --> [data|next] --> [data|next]
# ^_______________^
node = self.headValue
for i in range(position-2):
node = node.next
node.next = node.next.next
while node.next != self.headValue:
node = node.next
node.next = self.headValue
def display(self):
printValue = self.headValue
if printValue is None:
print("list is empty")
while printValue is not None:
print (printValue.dataValue)
printValue = printValue.next
pass
def hint(self):
message=""""
Create a node class to have two variables
1. Store data (datavalue)
2. Next data address in last it is usually null in circular (next)
linked list
Create another class to perform manipulation in list
Insert First:
*To insert first element we need to have the data to whether any
data exist before if so then we have to store it safely
* Storing the data in headval
* Taking previous value to set next value of another node
* It repeats until it reaches the previous head value
* Setting the last value to head node
Insert last:
*To insert last element we need to have the data to whether any
data exist before if so then we have to store it safely
* It repeats until it reaches the head value is occurred
* Setting the last node next value to head node
Insert Middle:
*To insert middle element we need to have the data to whether any
data exist before if so then we have to store it safely
* Taking previous value to set next value of another node
* It repeats until it reaches the previous head value
* Setting the last next value to head node
Display:
Display will take next value of node repeatedly so the list is
infinite loop
"""
#creating object
#list = singleLinkedList()
#list.insertLast(50, 60,70)
#list.display()
'''
It shows the entered things at last
output:
=======
50
60
70
50...
'''
#list.insertFirst(10,20,30)
#list.display()
'''
It shows the entered things at first then remaining
output:
=======
10
20
30
50
60
70
10...
'''
#print(list.insertMiddle.__annotations__)
#list.insertMiddle(40,4)
#list.display()
'''
It shows the inserted element at nth position
output:
=======
10
20
30
40
50
60
70
10...
'''
#list.delete(6)
#list.display()
'''
It shows the list after deleting it
output:
=======
10
20
30
40
50
60
10...
'''
| 4,517 | 1,275 |
# Generated by Django 2.2.14 on 2020-08-23 10:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_referral_amount'),
]
operations = [
migrations.AddField(
model_name='item',
name='paystack_link',
field=models.CharField(blank=True, max_length=80, null=True),
),
]
| 407 | 141 |
"""Decorators for auth module."""
from functools import wraps
from src.protocol import make_response
from src.database import session_scope
from .models import Session
def login_required(func):
"""Check that user is logged in based on the valid token exists in request."""
@wraps(func)
def wrapper(request, *args, **kwargs):
if 'token' not in request:
return make_response(request, 401, 'Valid authentication credentials lack')
with session_scope() as db_session:
user_session = db_session.query(Session).filter_by(token=request.get('token')).first()
if not user_session or user_session.closed:
return make_response(request, 403, 'Access denied')
return func(request, *args, **kwargs)
return wrapper
| 798 | 218 |
# Name:
# Date:
# proj05: functions and lists
# Part I
def divisors(num):
"""
Takes a number and returns all divisors of the number, ordered least to greatest
:param num: int
:return: list (int)
"""
# Fill in the function and change the return statment.
numlist = []
check = 1
while check <=num:
divisor = num%check
if divisor == 0:
numlist.append(check)
check = check + 1
else:
check = check + 1
return numlist
def prime(num):
"""
Takes a number and returns True if the number is prime, otherwise False
:param num: int
:return: bool
"""
# Fill in the function and change the return statement.
if len(divisors(num)) == 2:
return True
return False
# Part II:
# REVIEW: Conditionals, for loops, lists, and functions
#
# INSTRUCTIONS:
#
# 1. Make the string "sentence_string" into a list called "sentence_list" sentence_list
# should be a list of each letter in the string: ['H', 'e', 'l', 'l', 'o', ',', ' ', 'm',
# 'y', ' ', 'n', 'a', 'm', 'e', ' ', 'i', 's', ' ', 'M', 'o', 'n', 't', 'y', ' ', 'P',
# 'y', 't', 'h', 'o', 'n', '.']
#
# Hint: Use a for loop and with an append function: list.append(letter)
#
# sentence_string = "Hello, my name is Monty Python."
# sentencelist = []
# counter = 0
# for item in sentence_string:
# letter = sentence_string[counter]
# sentencelist.append(letter)
# counter = counter + 1
# print sentencelist
# 2. Print every item of sentence_list on a separate line using a for loop, like this:
# H
# e
# l
# l
# o
# ,
#
# m
# y
# .... keeps going on from here.
# 3: Write a for loop that goes through each letter in the list vowels. If the current
# letter is 'b', print out the index of the current letter (should print out the
# number 1).
# vowels = ['a', 'b', 'i', 'o', 'u', 'y']
# counter = 0
# while counter <= len(vowels):
# if vowels[counter] == "b":
# break
# else:
# counter = counter + 1
# print counter
# 4: use the index found to change the list vowels so that the b is replaced with an e.
# for letter in vowels:
# vowels[1]="e"
# print vowels
# 5: Loop through each letter in the sentence_string. For each letter, check to see if the
# number is in the vowels list. If the letter is in the vowels list, add one to a
# counter. Print out the counter at the end of the loop. This counter should show how
# many vowels are in sentence_string.
# counter = 0
# for letter in sentence_string:
# if letter in vowels:
# counter = counter + 1
# print counter
# 6: Make a new function called "vowelFinder" that will return a list of the vowels
# found in a list (no duplicates).The function's parameters should be "list" and "vowels."
vowels = ['a', 'e', 'i', 'o', 'u', 'y']
def vowelFinder(list, vowels):
counter = 0
letter = list[counter]
vowellist = []
finalvowel= []
lastvowel=[]
for letter in list:
if letter in vowels:
vowellist.append(letter)
finalvowel.append(letter)
counter = counter + 1
else:
counter = counter + 1
for item in vowellist:
if item in finalvowel and item not in lastvowel:
lastvowel.append(item)
lastvowel = sorted(lastvowel)
return lastvowel
sentence = ["H","e","l","l","o","h","o","w","a","r","e","y","o","u"]
print vowelFinder(sentence, vowels)
# Example:
# vowelList = vowelFinder(sentence_list, vowels)
# print vowelList
# ['a', 'e', 'i', 'o', 'y']
# def vowelFinder(sentence_list, vowels):
# return [] | 3,607 | 1,235 |
# Copyright 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
==============
BQPlot Package
==============
.. currentmodule:: bqplot
Each plot starts with a `Figure` object. A `Figure` has a number of `Axis` objects (horizontal and vertical) and a number of `Mark` objects. Each `Mark` object is a visual representation of the data. Each `Axis` and `Mark` has a `Scale` object. The `Scale` objects transform data into a visual property (typically a location in pixel space, but could be a color, etc.). An `Axis` draws an axis associated with the scale. ::
from bqplot import *
from IPython.display import display
x_data = range(10)
y_data = [i ** 2 for i in x_data]
x_sc = LinearScale()
y_sc = LinearScale()
ax_x = Axis(label='Test X', scale=x_sc, tick_format='0.0f')
ax_y = Axis(label='Test Y', scale=y_sc, orientation='vertical', tick_format='0.2f')
line = Lines(x=x_data,
y=y_data,
scales={'x':x_sc, 'y':y_sc},
colors=['red', 'yellow'])
fig = Figure(axes=[ax_x, ax_y], marks=[line])
display(fig)
.. automodule:: bqplot.figure
.. automodule:: bqplot.scales
.. automodule:: bqplot.marks
.. automodule:: bqplot.axes
.. automodule:: bqplot.market_map
.. automodule:: bqplot.interacts
.. automodule:: bqplot.traits
.. automodule:: bqplot.map
.. automodule:: bqplot.pyplot
"""
from .figure import *
from .axes import *
from .marks import *
from .scales import *
from .default_tooltip import *
| 2,034 | 674 |
'''
Une case est definie par sa couleur,
ses coordonnees et un acces a la grille
pour recuperer ses voisins
'''
class Case():
def __init__(self, x, y, couleur, grille):
self._x = x
self._y = y
self._couleur = couleur
self._grille = grille
self.GREY = "grey"
def getCouleur(self):
return self._couleur
def getCouleurOriginale(self):
if self._couleur[-1] == "3":
return self._couleur[:-1]+"2"
return self._couleur
def getX(self):
return self._x
def getY(self):
return self._y
def setX(self, x):
self._x = x
def setY(self, y):
self._y = y
def setCouleur(self, couleur):
self._couleur = couleur
def getNord(self):
if self.getY() > 0:
return self._grille[self._y-1][self._x]
return None
def getSud(self):
if self.getY() < len(self._grille) - 1:
return self._grille[self._y+1][self._x]
return None
def getEst(self):
if self.getX() < len(self._grille) - 1:
return self._grille[self._y][self._x+1]
return None
def getOuest(self):
if self.getX() > 0:
return self._grille[self._y][self._x-1]
return None
def getVoisins(self):
return [self.getNord(), self.getSud(), self.getEst(), self.getOuest()]
def getGrille(self):
return self._grille
def surbrillance(self):
self._couleur = self._couleur[:-1] + "2"
def couleurParDefaut(self):
self._couleur = self._couleur[:-1] + "3"
def detruire(self):
self.setCouleur(self.GREY)
def estDetruite(self):
return self._couleur == self.GREY | 1,915 | 700 |
#import Libraries
import cv2
import sys
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
##################################################
'''
This example illustrates how to extract interesting key points
as features from an image
Usage:
keypointsSIFTDescriptor.py [<image_name>]
image argument defaults to fruits.jpg
'''
#Read from input
try:
fn = sys.argv[1]
except IndexError:
fn = "img/home.jpg"
##################################################
#Read image and plot it
img_original = mpimg.imread(fn)
img = mpimg.imread(fn)
plt.subplot(121), plt.imshow(img)
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
#grayscale it
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
##################################################
#use SIFT descriptor for image key points feature extraction
sift = cv2.xfeatures2d.SIFT_create()
(kps, sift) = sift.detectAndCompute(gray, None)
##################################################
#draw the keypoints
img = cv2.drawKeypoints(gray,kps,None,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.subplot(122), plt.imshow(img)
plt.title('Image with extracted keypoints'), plt.xticks([]), plt.yticks([])
plt.show()
################################################## | 1,280 | 408 |
import xml.etree.ElementTree as ET
from .baseVmWareXmlResponse import BaseVmWareXmlResponse
class GetHostInfoResponse(BaseVmWareXmlResponse):
def __str__(self):
return ('GetHostInfoResponse[vendor={} model={} vCPUs={} memory={}]').format(
self.vendor, self.model, self.vCPUs, self.memory)
def toDict(self):
return dict(vendor=self.vendor, model=self.model, vCPUs=self.vCPUs, memory=self.memory)
def __init__(self, response):
data = ET.fromstring(response)
innerData = self.getSubTreeByTree(
data, ['Body', 'RetrievePropertiesExResponse', 'returnval', 'objects'])
dataSet = self.findPropertySetValue(innerData,'summary.hardware',False)
if dataSet is None:
print(response)
raise ValueError('no know response data found')
self.vendor = self.getSubTree(dataSet,'vendor').text
self.model = self.getSubTree(dataSet,'model').text
self.vCPUs = int(self.getSubTree(dataSet,'numCpuThreads').text)
self.memory = int(self.getSubTree(dataSet,'memorySize').text)
| 1,103 | 338 |
import sys
import logging
if sys.version_info[:2] <= (2, 6):
logging.Logger.getChild = lambda self, suffix:\
self.manager.getLogger('.'.join((self.name, suffix)) if self.root is not self else suffix)
import pytest
from chatexchange.markdown_detector import markdown
logger = logging.getLogger(__name__)
def test_markdown():
assert markdown('no markdown here') is None
assert markdown(' code formatting') is not None
assert markdown('hello `code` here') is not None
assert markdown('bare url https://example.com/link gets linked') is not None
assert markdown('[hello](http://example.com/hello)') is not None
assert markdown('adjacent[hello](http://example.com/hello)text') is not None
assert markdown('adjacent.[hello](https://example.com/hello).x') is not None
assert markdown('[ftp](ftp://example.com/link) works too') is not None
assert markdown('text with *italics*') is not None
assert markdown('and **bold** too') is not None
assert markdown('*not italics') is None
assert markdown('**not bold either') is None
assert markdown('***not both neither too also as well') is None
assert markdown('****not bold or italic') is None
# Odd corner cases: many backticks
assert markdown('two ``backticks`` code') is not None
assert markdown('unpaired `single double`` fail') is None
assert markdown('unpaired `single triple``` fail') is None
# Weirdly, 'unpaired ``double triple```' gets rendered as
# 'unpaired <code>double triple</code>`'
#assert markdown('unpaired ``double triple``` fail') is not None
assert markdown('``````````````````18 ticks``````````````````') is not None
# Odd corner cases: broken links
assert markdown(
'[](http://example.com/link) gets linked inside parens') is not None
assert markdown('[no link]() is not linked') is None
assert markdown('[mailto is not linked](mailto:self@example.com)') is None
assert markdown('[sftp](sftp://example.com/link) is not linked') is None
assert markdown('[ftps](ftps://example.com/link) is not linked') is None
assert markdown(
'[https://example.com/no-link]() link in square brackets') is not None
assert markdown(
'empty anchor, link in parens [](https://example.com/)') is not None
# Odd corner cases: mixed bold and italics
assert markdown('this is ***bold italics***') is not None
assert markdown('this is **bold and *italics* too**') is not None
assert markdown(
'this is *italics and **bold** and **more** too*') is not None
# Odd corner cases: broken bold or italics
assert markdown('**unpaired neither*') is None
assert markdown('*unpaired nor**') is None
assert markdown('***unpaired** in the end') is None
# chat actually briefly formats as bold italics, then reverts
#assert markdown('****this is weird****') is None
| 2,900 | 857 |
x_indexes = [i for i, j in enumerate(xaddr)] if j == myxaddr]
y_indexes = [i for i, j in enumerate(yaddr)] if j == myyaddr]
print('x_indexes: ' + str(x_indexes))
print('y_indexes:' +str(y_indexes))
# keep common indexes
common = [i for i, j in zip(x_indexes, y_indexes) if i == j]
print('common: ' + str(common))
| 313 | 127 |
import os
from typing import Optional, List
from fastapi import APIRouter, Request, Response, status, Depends
from pyefriend_api.models.setting import Setting as SettingModel
from pyefriend_api.app.auth import login_required
from .schema import SettingOrm, SettingUpdate
r = APIRouter(prefix='/setting',
tags=['setting'])
@r.get('/', response_model=List[SettingOrm])
async def get_settings(user=Depends(login_required)):
"""### 세팅 가능한 값 전부 조회 """
return [SettingOrm.from_orm(item) for item in SettingModel.list()]
@r.post('/', status_code=status.HTTP_200_OK)
async def initialize_settings(user=Depends(login_required)):
"""
### 세팅값 초기화
- force: True일 경우 기존 값 초기화
"""
SettingModel.initialize(first=False)
return Response('Success', status_code=status.HTTP_200_OK)
@r.get('/{section}/{key}', response_model=SettingOrm)
async def get_a_setting(section: str, key: str, user=Depends(login_required)):
"""
### 세팅값 조회
- section: setting 테이블 내 조회할 section
- key: section 내 조회할 key
"""
return SettingOrm.from_orm(SettingModel.get(section=section, key=key))
@r.put('/{section}/{key}', status_code=status.HTTP_200_OK)
async def change_setting(section: str,
key: str,
request: SettingUpdate,
user=Depends(login_required)):
"""
### 세팅값 수정
- section: setting 테이블 내 조회할 section
- key: section 내 조회할 key
"""
SettingModel.update(section=section,
key=key,
value=request.value)
return Response('Success', status_code=status.HTTP_200_OK)
| 1,651 | 601 |
"""Unit tests for util/multi_analysis.py"""
import os
import unittest
from dsi.multi_analysis import MultiEvergreenAnalysis, main
from test_lib.fixture_files import FixtureFiles
from test_lib.test_requests_parent import TestRequestsParent
FIXTURE_FILES = FixtureFiles()
class TestMultiEvergreenAnalysis(TestRequestsParent):
"""
Test the MultiEvergreen client class.
"""
def test_parse_options(self):
"""MultiEvergreenAnalysis: parse options."""
expected = {
"evergreen_config": FIXTURE_FILES.repo_root_file_path("config.yml"),
"csv": True,
"json": False,
"json_array": False,
"ycsbfix": False,
"yml": False,
"id": ["587773af3ff120ab9000946", "587773b03ff1220ab900094a"],
}
args = [
"587773af3ff120ab9000946",
"587773b03ff1220ab900094a",
"--evergreen-config",
FIXTURE_FILES.repo_root_file_path("config.yml"),
]
client = MultiEvergreenAnalysis(args)
client.parse_options()
self.assertEqual(client.config, expected)
def test_parse_options2(self):
"""MultiEvergreenAnalysis: parse more advanced options."""
input_file = FIXTURE_FILES.fixture_file_path("multi_patch_builds.yml")
expected_config = {
"evergreen_config": FIXTURE_FILES.repo_root_file_path("config.yml"),
"csv": False,
"json": True,
"json_array": False,
"ycsbfix": False,
"yml": False,
"out": "outfile.json",
"id": [],
"continue": input_file,
}
args = [
"--json",
"--out",
"outfile.json",
"--continue",
input_file,
"--evergreen-config",
FIXTURE_FILES.repo_root_file_path("config.yml"),
]
client = MultiEvergreenAnalysis(args)
client.parse_options()
self.assertEqual(client.config, expected_config)
self.assertEqual(client.builds[1]["ID"], "5873a2623ff1224e8e0003ee")
def test_aggregate_results(self):
"""MultiEvergreenAnalysis.aggregate_results()"""
data = [
{
"a_variant": {
"a_task": {
"data": {
"results": [
{
"name": "a_test",
"results": {
"32": {
"ops_per_sec": 111.123,
"ops_per_sec_values": [111.123, 123.111, 234.123],
},
"64": {
"ops_per_sec": 222.234,
"ops_per_sec_values": [222.234, 333.123, 444.111],
},
},
}
]
}
}
}
},
{
"a_variant": {
"a_task": {
"data": {
"results": [
{
"name": "a_test",
"results": {
"32": {
"ops_per_sec": 123,
"ops_per_sec_values": [123.123, 234.234, 345.345],
},
"64": {
"ops_per_sec": 234,
"ops_per_sec_values": [234.234, 345.345, 456.456],
},
},
}
]
}
}
}
},
]
expected = {
"a_variant": {
"a_task": {
"a_test": {
32: {
"all_variance_to_mean": 44.10677573939485,
"it_range_to_median": [0.999098374637522, 0.9487179487179488],
"it_range_to_median_avg": 0.97390816167773542,
"it_range_to_median_max": 0.999098374637522,
"it_range_to_median_min": 0.9487179487179488,
"it_max": [234.123, 345.345],
"it_variance": [4599.396047999999, 12345.654321000002],
"it_range": [122.99999999999999, 222.22200000000004],
"all_min": 111.123,
"all_median": 178.623,
"it_median": [123.111, 234.234],
"min": 111.123,
"all_variance": 8608.6061151,
"all_range_to_median": 1.3112645068104334,
"max": 123,
"all_range": 234.22200000000004,
"variance": 70.53156449999994,
"variance_to_mean": 0.6025171768685686,
"it_min": [111.123, 123.123],
"all_max": 345.345,
"it_variance_to_mean": [29.46083467098815, 52.706500000000005],
"average": 117.0615,
"median": 117.0615,
"ops_per_sec": [111.123, 123],
"ops_per_sec_values": [
[111.123, 123.111, 234.123],
[123.123, 234.234, 345.345],
],
"range": 11.876999999999995,
"it_average": [156.119, 234.234],
"range_to_median": 0.10145948924283386,
"all_average": 195.17650000000003,
},
64: {
"all_variance_to_mean": 29.198995681067522,
"it_range_to_median": [0.666051278356643, 0.6434782608695652],
"it_range_to_median_avg": 0.65476476961310404,
"it_range_to_median_max": 0.666051278356643,
"it_range_to_median_min": 0.6434782608695652,
"it_max": [444.111, 456.456],
"it_variance": [12307.351598999998, 12345.654321000002],
"it_range": [221.87699999999998, 222.222],
"all_min": 222.234,
"all_median": 339.23400000000004,
"it_median": [333.123, 345.345],
"min": 222.234,
"all_variance": 9905.773884299999,
"all_range_to_median": 0.6904437644811545,
"max": 234,
"all_range": 234.222,
"variance": 69.21937799999989,
"variance_to_mean": 0.30343805152619,
"it_min": [222.234, 234.234],
"all_max": 456.456,
"it_variance_to_mean": [36.9417077855419, 35.74875652173913],
"average": 228.11700000000002,
"median": 228.11700000000002,
"ops_per_sec": [222.234, 234],
"ops_per_sec_values": [
[222.234, 333.123, 444.111],
[234.234, 345.345, 456.456],
],
"range": 11.765999999999991,
"it_average": [333.156, 345.345],
"range_to_median": 0.05157879509199222,
"all_average": 339.25050000000005,
},
}
}
}
}
client = MultiEvergreenAnalysis()
client.results = data
client.aggregate_results()
self.assertEqual(client.agg_results, expected)
def test_flatten(self):
"""MultiEvergreenAnalysis.flat_results()"""
client = MultiEvergreenAnalysis()
client.agg_results = {
"a_variant": {
"a_task": {
"a_test": {
32: {
"all_variance_to_mean": 44.10677573939485,
"it_range_to_median": [0.999098374637522, 0.9487179487179488],
"it_range_to_median_avg": 0.97390816167773542,
"it_range_to_median_max": 0.999098374637522,
"it_range_to_median_min": 0.9487179487179488,
"it_max": [234.123, 345.345],
"it_variance": [4599.396047999999, 12345.654321000002],
"it_range": [122.99999999999999, 222.22200000000004],
"all_min": 111.123,
"all_median": 178.623,
"it_median": [123.111, 234.234],
"min": 111.123,
"all_variance": 8608.6061151,
"all_range_to_median": 1.3112645068104334,
"max": 123,
"all_range": 234.22200000000004,
"variance": 70.53156449999994,
"variance_to_mean": 0.6025171768685686,
"it_min": [111.123, 123.123],
"all_max": 345.345,
"it_variance_to_mean": [29.46083467098815, 52.706500000000005],
"average": 117.0615,
"median": 117.0615,
"ops_per_sec": [111.123, 123],
"ops_per_sec_values": [
[111.123, 123.111, 234.123],
[123.123, 234.234, 345.345],
],
"range": 11.876999999999995,
"it_average": [156.119, 234.234],
"range_to_median": 0.10145948924283386,
"all_average": 195.17650000000003,
},
64: {
"all_variance_to_mean": 29.198995681067522,
"it_range_to_median": [0.666051278356643, 0.6434782608695652],
"it_range_to_median_avg": 0.65476476961310404,
"it_range_to_median_max": 0.666051278356643,
"it_range_to_median_min": 0.6434782608695652,
"it_max": [444.111, 456.456],
"it_variance": [12307.351598999998, 12345.654321000002],
"it_range": [221.87699999999998, 222.222],
"all_min": 222.234,
"all_median": 339.23400000000004,
"it_median": [333.123, 345.345],
"min": 222.234,
"all_variance": 9905.773884299999,
"all_range_to_median": 0.6904437644811545,
"max": 234,
"all_range": 234.222,
"variance": 69.21937799999989,
"variance_to_mean": 0.30343805152619,
"it_min": [222.234, 234.234],
"all_max": 456.456,
"it_variance_to_mean": [36.9417077855419, 35.74875652173913],
"average": 228.11700000000002,
"median": 228.11700000000002,
"ops_per_sec": [222.234, 234],
"ops_per_sec_values": [
[222.234, 333.123, 444.111],
[234.234, 345.345, 456.456],
],
"range": 11.765999999999991,
"it_average": [333.156, 345.345],
"range_to_median": 0.05157879509199222,
"all_average": 339.25050000000005,
},
}
}
}
}
expected = [
{
"thread_level": 32,
"variant_name": "a_variant",
"task_name": "a_task",
"test_name": "a_test",
"added_label": "added_label",
"all_variance_to_mean": 44.10677573939485,
"it_range_to_median": [0.999098374637522, 0.9487179487179488],
"it_range_to_median_avg": 0.97390816167773542,
"it_range_to_median_max": 0.999098374637522,
"it_range_to_median_min": 0.9487179487179488,
"it_max": [234.123, 345.345],
"it_variance": [4599.396047999999, 12345.654321000002],
"it_range": [122.99999999999999, 222.22200000000004],
"all_min": 111.123,
"all_median": 178.623,
"it_median": [123.111, 234.234],
"min": 111.123,
"all_variance": 8608.6061151,
"all_range_to_median": 1.3112645068104334,
"max": 123,
"all_range": 234.22200000000004,
"variance": 70.53156449999994,
"variance_to_mean": 0.6025171768685686,
"it_min": [111.123, 123.123],
"all_max": 345.345,
"it_variance_to_mean": [29.46083467098815, 52.706500000000005],
"average": 117.0615,
"median": 117.0615,
"ops_per_sec": [111.123, 123],
"ops_per_sec_values": [[111.123, 123.111, 234.123], [123.123, 234.234, 345.345]],
"range": 11.876999999999995,
"it_average": [156.119, 234.234],
"range_to_median": 0.10145948924283386,
"all_average": 195.17650000000003,
},
{
"thread_level": 64,
"variant_name": "a_variant",
"task_name": "a_task",
"test_name": "a_test",
"added_label": "added_label",
"all_variance_to_mean": 29.198995681067522,
"it_range_to_median": [0.666051278356643, 0.6434782608695652],
"it_range_to_median_avg": 0.65476476961310404,
"it_range_to_median_max": 0.666051278356643,
"it_range_to_median_min": 0.6434782608695652,
"it_max": [444.111, 456.456],
"it_variance": [12307.351598999998, 12345.654321000002],
"it_range": [221.87699999999998, 222.222],
"all_min": 222.234,
"all_median": 339.23400000000004,
"it_median": [333.123, 345.345],
"min": 222.234,
"all_variance": 9905.773884299999,
"all_range_to_median": 0.6904437644811545,
"max": 234,
"all_range": 234.222,
"variance": 69.21937799999989,
"variance_to_mean": 0.30343805152619,
"it_min": [222.234, 234.234],
"all_max": 456.456,
"it_variance_to_mean": [36.9417077855419, 35.74875652173913],
"average": 228.11700000000002,
"median": 228.11700000000002,
"ops_per_sec": [222.234, 234],
"ops_per_sec_values": [[222.234, 333.123, 444.111], [234.234, 345.345, 456.456]],
"range": 11.765999999999991,
"it_average": [333.156, 345.345],
"range_to_median": 0.05157879509199222,
"all_average": 339.25050000000005,
},
]
flat_results = client.flat_results({"added_label": "added_label"})
self.assertEqual(flat_results, expected)
def test_ycsb_fix(self):
"""Test MultiEvergreenAnalysis._ycsb_fix()"""
client = MultiEvergreenAnalysis()
client.results = [
{
"a_variant": {
"a_task": {
"build_id": "a_build_id",
"create_time": "2017-04-05T20:14:53.193Z",
"data": {
"results": [
{
"end": 1491482988,
"name": "ycsb_load-wiredTiger",
"results": {"32": {"ops_per_sec": 50915.97845235792}},
"start": 1491482887,
"workload": "ycsb",
},
{
"end": 1491483185,
"name": "ycsb_load-wiredTiger",
"results": {"32": {"ops_per_sec": 50418.98173824482}},
"start": 1491483084,
"workload": "ycsb",
},
]
},
}
}
}
]
client._ycsb_fix()
print(client.results)
expected_results = [
{
"a_variant": {
"a_task": {
"build_id": "a_build_id",
"create_time": "2017-04-05T20:14:53.193Z",
"data": {
"results": [
{
"end": 1491482988,
"name": "ycsb_load-wiredTiger",
"results": {
"32": {
"ops_per_sec": 50667.480095301369,
"ops_per_sec_values": [
50915.97845235792,
50418.98173824482,
],
}
},
"start": 1491482887,
"workload": "ycsb",
}
]
},
}
}
}
]
self.assertEqual(client.results, expected_results)
def test_main(self):
"""MultiEvergreenAnalysis: Fetch real Evergreen results and write output files."""
evergreen_config = FIXTURE_FILES.repo_root_file_path("config.yml")
args = [
"--evergreen-config",
evergreen_config,
"--json",
"--out",
"test_outfile.json",
"587773af3ff1220ab9000946",
"587773b03ff1220ab900094a",
]
main(args)
# Intentionally not checking output files, just testing that we run without exceptions.
os.remove("test_outfile.json")
if __name__ == "__main__":
unittest.main()
| 20,031 | 7,796 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-08 03:42
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('pacientes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MotivoConsulta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creado_el', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('modificado_el', models.DateTimeField(auto_now=True, verbose_name='Fecha de modificación')),
('motivo_consulta_paciente', models.TextField(blank=True, default='', help_text='Signos y síntomas explicados por el paciente.', verbose_name='motivo según el paciente')),
('diagnostico_medico', models.TextField(help_text='Diagnóstico que elaboró el médico especialista.', verbose_name='diagnóstico médico')),
('evaluacion_kinesica', models.TextField(help_text='Evaluación elaborada por el kinesiólogo/a.', verbose_name='evaluación kinésica')),
('tratamientos_previos', models.TextField(blank=True, help_text='Descripción de tratamientos previos por el mismo motivo de consulta', verbose_name='tratamientos previos')),
('observaciones', models.TextField(blank=True, verbose_name='observaciones')),
('paciente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='motivos_de_consulta', to='pacientes.Paciente', verbose_name='paciente')),
],
options={
'verbose_name': 'motivo de consulta',
'verbose_name_plural': 'motivos de consulta',
},
),
migrations.CreateModel(
name='Objetivo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creado_el', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('modificado_el', models.DateTimeField(auto_now=True, verbose_name='Fecha de modificación')),
('descripcion', models.CharField(max_length=255, verbose_name='descripción')),
('fecha_inicio', models.DateField(null=True, verbose_name='fecha de inicio')),
('fecha_cumplido', models.DateField(null=True, verbose_name='fecha de éxito')),
('observaciones', models.TextField(blank=True, verbose_name='observaciones')),
('motivo_consulta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='objetivos', to='tratamientos.MotivoConsulta', verbose_name='motivo de consulta')),
],
options={
'verbose_name': 'objectivo',
'verbose_name_plural': 'objetivos',
},
),
migrations.CreateModel(
name='Planificacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creado_el', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('modificado_el', models.DateTimeField(auto_now=True, verbose_name='Fecha de modificación')),
('fecha_ingreso', models.DateField(auto_now_add=True, verbose_name='fecha de ingreso')),
('fecha_alta', models.DateField(blank=True, help_text='fecha de alta tentativa.', null=True, verbose_name='fecha de alta')),
('cantidad_sesiones', models.IntegerField(default=10, help_text='Cantidad de sesiones necesarias recetadas por el médico.', verbose_name='cantidad de sesiones')),
('frecuencia', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(7)], verbose_name='frecuencia semanal')),
('estado', models.IntegerField(choices=[(1, 'Planificado'), (2, 'En curso'), (3, 'Finalizado'), (4, 'Cancelado')], default=1, verbose_name='estado')),
('comentarios', models.TextField(blank=True, null=True, verbose_name='comentarios')),
('conclusion', models.TextField(blank=True, null=True, verbose_name='conclusión')),
('motivo_consulta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='planificaciones', to='tratamientos.MotivoConsulta')),
],
options={
'verbose_name': 'planificación',
'verbose_name_plural': 'planificaciones',
},
),
migrations.CreateModel(
name='Sesion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creado_el', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('modificado_el', models.DateTimeField(auto_now=True, verbose_name='Fecha de modificación')),
('fecha', models.DateField(verbose_name='fecha')),
('duracion', models.PositiveSmallIntegerField(default=60, help_text='Duración de la sesión en minutos', verbose_name='duración de la sesión')),
('estado_paciente', models.TextField(blank=True, help_text='Descripción de cómo se siente el paciente antes de la sesión', null=True, verbose_name='estado de paciente')),
('actividad', models.TextField(blank=True, null=True, verbose_name='actividades de la sesión')),
('comentarios', models.TextField(blank=True, null=True, verbose_name='comentarios')),
('motivo_consulta', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sesiones', to='tratamientos.MotivoConsulta')),
('paciente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sesiones_paciente', to='pacientes.Paciente')),
],
options={
'verbose_name': 'sesión',
'verbose_name_plural': 'sesiones',
},
),
]
| 6,356 | 1,891 |
from typing import Any, List, Dict
RAW_INFO: Dict[str, List[Dict[str, Any]]] = {
"streams": [
{
"index": 0,
"codec_name": "h264",
"codec_long_name": "H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10",
"profile": "High",
"codec_type": "video",
"codec_time_base": "1001/48000",
"codec_tag_string": "avc1",
"codec_tag": "0x31637661",
"width": 1920,
"height": 800,
"coded_width": 1920,
"coded_height": 800,
"has_b_frames": 2,
"sample_aspect_ratio": "1:1",
"display_aspect_ratio": "12:5",
"pix_fmt": "yuv420p",
"level": 41,
"chroma_location": "left",
"refs": 1,
"is_avc": "true",
"nal_length_size": "4",
"r_frame_rate": "24000/1001",
"avg_frame_rate": "24000/1001",
"time_base": "1/24000",
"start_pts": 0,
"start_time": "0.000000",
"duration_ts": 168240072,
"duration": "7010.003000",
"bit_rate": "2150207",
"bits_per_raw_sample": "8",
"nb_frames": "168072",
"disposition": {
"default": 1,
"dub": 0,
"original": 0,
"comment": 0,
"lyrics": 0,
"karaoke": 0,
"forced": 0,
"hearing_impaired": 0,
"visual_impaired": 0,
"clean_effects": 0,
"attached_pic": 0,
"timed_thumbnails": 0,
},
"tags": {"language": "und", "handler_name": "VideoHandler"},
},
{
"index": 1,
"codec_name": "aac",
"codec_long_name": "AAC (Advanced Audio Coding)",
"profile": "LC",
"codec_type": "audio",
"codec_time_base": "1/48000",
"codec_tag_string": "mp4a",
"codec_tag": "0x6134706d",
"sample_fmt": "fltp",
"sample_rate": "48000",
"channels": 2,
"channel_layout": "stereo",
"bits_per_sample": 0,
"r_frame_rate": "0/0",
"avg_frame_rate": "0/0",
"time_base": "1/48000",
"start_pts": 0,
"start_time": "0.000000",
"duration_ts": 336480768,
"duration": "7010.016000",
"bit_rate": "143882",
"max_bit_rate": "143882",
"nb_frames": "328597",
"disposition": {
"default": 1,
"dub": 0,
"original": 0,
"comment": 0,
"lyrics": 0,
"karaoke": 0,
"forced": 0,
"hearing_impaired": 0,
"visual_impaired": 0,
"clean_effects": 0,
"attached_pic": 0,
"timed_thumbnails": 0,
},
"tags": {"language": "und", "handler_name": "SoundHandler"},
},
]
}
TORRENTS: List[Dict[str, Any]] = [
{
"added_on": 1612534456,
"amount_left": 0,
"auto_tmm": False,
"availability": -1,
"category": "1",
"completed": 1227921990,
"completion_on": 1612542927,
"content_path": "/home/user/Downloads/2021-01-11-raspios-buster-armhf.zip",
"dl_limit": -1,
"dlspeed": 0,
"downloaded": 1243692499,
"downloaded_session": 0,
"eta": 8640000,
"f_l_piece_prio": False,
"force_start": False,
"hash": "9005f3068fff382eca98cdd6380f08599319520f",
"last_activity": 0,
"magnet_uri": "magnet:?xt=urn:btih:9005f3068fff382eca98cdd6380f08599319520f&dn=2021-01-11-raspios-buster-armhf.zip&tr=http%3a%2f%2ftracker.raspberrypi.org%3a6969%2fannounce",
"max_ratio": -1,
"max_seeding_time": -1,
"name": "2021-01-11-raspios-buster-armhf.zip",
"num_complete": 0,
"num_incomplete": 615,
"num_leechs": 0,
"num_seeds": 0,
"priority": 0,
"progress": 1,
"ratio": 6.351007187348165e-05,
"ratio_limit": -2,
"save_path": "/home/user/Downloads/",
"seeding_time_limit": -2,
"seen_complete": -3600,
"seq_dl": False,
"size": 1227921990,
"state": "pausedUP",
"super_seeding": False,
"tags": "",
"time_active": 14334,
"total_size": 1227921990,
"tracker": "",
"trackers_count": 1,
"up_limit": -1,
"uploaded": 78987,
"uploaded_session": 0,
"upspeed": 0,
},
{
"added_on": 1612746101,
"amount_left": 1741422592,
"auto_tmm": False,
"availability": 0,
"category": "2",
"completed": 0,
"completion_on": -3600,
"content_path": "/home/user/Downloads/xubuntu-20.04.2-desktop-amd64.iso",
"dl_limit": -1,
"dlspeed": 0,
"downloaded": 0,
"downloaded_session": 0,
"eta": 8640000,
"f_l_piece_prio": False,
"force_start": False,
"hash": "5d6bf814125b1660f29a6841dbb5f6e277eb02cc",
"last_activity": 1612746105,
"magnet_uri": "magnet:?xt=urn:btih:5d6bf814125b1660f29a6841dbb5f6e277eb02cc&dn=xubuntu-20.04.2-desktop-amd64.iso&tr=https%3a%2f%2ftorrent.ubuntu.com%2fannounce",
"max_ratio": -1,
"max_seeding_time": -1,
"name": "xubuntu-20.04.2-desktop-amd64.iso",
"num_complete": 0,
"num_incomplete": 0,
"num_leechs": 0,
"num_seeds": 0,
"priority": 4,
"progress": 0,
"ratio": 0,
"ratio_limit": -2,
"save_path": "/home/user/Downloads/",
"seeding_time_limit": -2,
"seen_complete": -3600,
"seq_dl": False,
"size": 1741422592,
"state": "stalledDL",
"super_seeding": False,
"tags": "",
"time_active": 0,
"total_size": 1741422592,
"tracker": "",
"trackers_count": 1,
"up_limit": -1,
"uploaded": 0,
"uploaded_session": 0,
"upspeed": 0,
},
]
MOVIEDB: Dict[str, Any] = {
"movie_results": [
{
"genre_ids": [18],
"original_language": "en",
"original_title": "12 Angry Men",
"poster_path": "/wh0f80G6GZvYBNiYmvqFngt3IYq.jpg",
"video": False,
"vote_average": 8.5,
"overview": "The defense and the prosecution have rested and the jury is filing into the jury room to decide if a young Spanish-American is guilty or innocent of murdering his father. What begins as an open and shut case soon becomes a mini-drama of each of the jurors' prejudices and preconceptions about the trial, the accused, and each other.",
"release_date": "1957-04-10",
"vote_count": 5322,
"title": "12 Angry Men",
"adult": False,
"backdrop_path": "/qqHQsStV6exghCM7zbObuYBiYxw.jpg",
"id": 389,
"popularity": 20.461,
}
],
"person_results": [],
"tv_results": [],
"tv_episode_results": [],
"tv_season_results": [],
}
| 7,308 | 2,955 |
from simulator import PolychromaticField, cf, mm
F = PolychromaticField(
spectrum=1.5 * cf.illuminant_d65,
extent_x=12.0 * mm,
extent_y=12.0 * mm,
Nx=1200,
Ny=1200,
)
F.add_aperture_from_image(
"./apertures/circular_rings.jpg", pad=(9 * mm, 9 * mm), Nx=1500, Ny=1500
)
rgb = F.compute_colors_at(z=1.5)
F.plot(rgb, xlim=[-8, 8], ylim=[-8, 8])
| 368 | 191 |
from items import vehicles, _xml
from gui.Scaleform.daapi.view.lobby.trainings.training_room import TrainingRoom;
from helpers.statistics import StatisticsCollector;
from game import init
import ScoreViewTools
def exportAll():
ScoreViewTools.Export.init()
ScoreViewTools.Export.cleanup()
ScoreViewTools.Export.gameInfo()
ScoreViewTools.Export.vehicles()
#ScoreViewTools.Export.gameData()
#ScoreViewTools.Export.equipment()
#ScoreViewTools.Export.consumables()
ScoreViewTools.Export.maps()
#ScoreViewTools.Export.serverSettings()
old_noteHangarLoadingState = StatisticsCollector.noteHangarLoadingState
def new_noteHangarLoadingState(self, state, initialState=False, showSummaryNow=False):
old_noteHangarLoadingState(self, state, initialState, showSummaryNow)
StatisticsCollector.noteHangarLoadingState = new_noteHangarLoadingState
print dir(TrainingRoom)
old_onSettingUpdated = TrainingRoom.onSettingUpdated
old_onRostersChanged = TrainingRoom.onRostersChanged
old_onPlayerStateChanged = TrainingRoom.onPlayerStateChanged
old__TrainingRoomBase__showSettings = TrainingRoom._TrainingRoomBase__showSettings
old_showRosters = TrainingRoom._showRosters
first = True
def new_onSettingUpdated(self, functional, settingName, settingValue):
ScoreViewTools.Export.trainingRoomSettings(functional)
old_onSettingUpdated(self, functional, settingName, settingValue)
def new_onRostersChanged(self, functional, rosters, full):
ScoreViewTools.Export.trainingRoomRoster(functional)
old_onRostersChanged(self, functional, rosters, full)
def new_onPlayerStateChanged(self, functional, roster, accountInfo):
ScoreViewTools.Export.trainingRoomRoster(functional)
old_onPlayerStateChanged(self, functional, roster, accountInfo)
def new__TrainingRoomBase__showSettings(self, functional):
ScoreViewTools.Export.trainingRoomSettings(functional)
old__TrainingRoomBase__showSettings(self, functional)
def new_showRosters(self, functional, rosters):
global first
if first:
first = False
exportAll()
ScoreViewTools.Export.trainingRoomRoster(functional)
old_showRosters(self, functional, rosters)
TrainingRoom.onSettingUpdated = new_onSettingUpdated
TrainingRoom.onRostersChanged = new_onRostersChanged
TrainingRoom.onPlayerStateChanged = new_onPlayerStateChanged
TrainingRoom._TrainingRoomBase__showSettings = new__TrainingRoomBase__showSettings
TrainingRoom._showRosters = new_showRosters
| 2,581 | 754 |
import random
def mergeSort(numbers):
if len(numbers) <= 1:
return numbers
left = numbers[:len(numbers)//2]
right = numbers[len(numbers)//2:]
left = mergeSort(left)
right = mergeSort(right)
numbers = merge(left, right, numbers)
return numbers
def merge(left, right, numbers):
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
numbers[k] = left[i]
i += 1
else:
numbers[k] = right[j]
j += 1
k +=1
# process any leftovers
while i < len(left):
numbers[k] = left[i]
i += 1
k +=1
while j < len(right):
numbers[k] = right[j]
j += 1
k +=1
return numbers
numbers = []
for i in range(0, 100):
numbers.append(random.randint(1, 100))
numbers = mergeSort(numbers)
print(numbers)
| 897 | 315 |
from utilsw2 import *
from Reader import *
from Adapted_voc_evaluation import *
import glob
path_to_video = 'datasets/AICity_data/train/S03/c010/vdo.avi'
path_to_frames = 'datasets/frames/'
results_path = 'Results/Task1_1'
def task4(color_space=cv2.COLOR_BGR2GRAY, mu_file = f"W2/task1_1/mu.pkl",sigma_file= f"W2/task1_1/sigma.pkl"):
video_n_frames = len(glob.glob1(path_to_frames, "*.jpg"))
mu, sigma = GetGaussianModel(path_to_frames, video_n_frames,color_space,mu_file,sigma_file)
lowLimit = int(video_n_frames * 0.25)
highLimit = int(video_n_frames)
det_bb = remove_background(mu,
sigma,
6,
path_to_frames,
lowLimit,
highLimit,
animation=True,
color_space=color_space)
reader = AICityChallengeAnnotationReader(path='datasets/AICity_data/train/S03/c010/gt/gt.txt',initFrame=int(video_n_frames * 0.25), finalFrame=int(video_n_frames))
gt = reader.get_annotations(classes=['car'], only_not_parked=True)
bb_gt = []
# for frame in gt.keys():
for frame in range(int(video_n_frames * 0.25), int(video_n_frames)):
annotations = gt.get(frame, [])
bb_gt.append(annotations)
ap, prec, rec = mean_average_precision(bb_gt , det_bb)
print (ap)
if __name__ == '__main__':
colors = [cv2.COLOR_BGR2HSV, cv2.COLOR_BGR2RGB, cv2.COLOR_BGR2YCrCb, cv2.COLOR_BGR2LAB]
for c in colors:
task4(c,f"W2/task4_1/mu{str(c)}.pkl",f"W2/task4_1/sigma{str(c)}.pkl")
| 1,586 | 626 |
# Copyright 2021 Sai Sampath Kumar Balivada
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
file handler reads and writes datastore entries to and from the disk.
file paths are case sensitive.
"""
import os.path
import datetime
from pathlib import Path
from dictstore.exceptions import InvalidFileExtension
def generate_file_header_string() -> str:
"""Generates file header string for the data file"""
header = '// Python Dictstore File\n'
date_string = str(datetime.datetime.now())
header += '// Last Rewrite: ' + date_string + '\n'
return header
class FileHandler:
"""
handles the dictstore datastore file(s)
"""
def __has_valid_file_extension(self):
"""Checks if the given file path ends with .dictstore"""
if self.file_path.endswith('.dictstore'):
return True
return False
def __init__(self, file_path) -> None:
"""
creates a file handler for the datastore file.
Exceptions:
OSError
InvalidFileExtension
"""
# store the given file path
self.file_path = file_path
# check if the filename is valid
if not self.__has_valid_file_extension():
raise InvalidFileExtension()
# check if file exists at path
# and create a datastore file if it doesn't exist
if not os.path.exists(self.file_path):
Path(os.path.dirname(self.file_path)).mkdir(
parents=True,
exist_ok=True
)
with open(self.file_path, 'w', encoding='utf-8') as data_file:
data_file.write(generate_file_header_string())
# open the file and read its contents
with open(self.file_path, 'r', encoding='utf-8') as data_file:
self.file_contents = data_file.read()
def rewrite_to_file(self, lines) -> None:
"""Writes the given lines to data file"""
with open(self.file_path, 'w', encoding='utf-8') as data_file:
data_file.write(generate_file_header_string())
data_file.writelines(lines)
def append_to_file(self, string: str) -> None:
"""Appends the given string to data file"""
with open(self.file_path, 'a', encoding='utf-8') as data_file:
data_file.write(string)
def read_from_file(self) -> str:
"""
Reads the contents of data file and
returns all the contents of file
without the first two lines
"""
with open(self.file_path, 'r', encoding='utf-8') as data_file:
data_file.readline()
data_file.readline()
return data_file.readlines()
| 3,207 | 909 |
# -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests action mapper"""
import unittest
from o2a.converter.relation import Relation
from o2a.converter.task import Task
from o2a.mappers.action_mapper import ActionMapper
TEST_MAPPER_NAME = "mapper_name"
TEST_DAG_NAME = "dag_name"
class TestActionMapper(unittest.TestCase):
def test_prepend_task_no_tasks(self):
task_1 = Task(task_id=TEST_MAPPER_NAME + "_1", template_name="pig.tpl")
with self.assertRaises(IndexError):
ActionMapper.prepend_task(task_to_prepend=task_1, tasks=[], relations=[])
def test_prepend_task_empty_relations(self):
task_1 = Task(task_id=TEST_MAPPER_NAME + "_1", template_name="pig.tpl")
task_2 = Task(task_id=TEST_MAPPER_NAME + "_2", template_name="pig.tpl")
tasks, relations = ActionMapper.prepend_task(task_to_prepend=task_1, tasks=[task_2], relations=[])
self.assertEqual([task_1, task_2], tasks)
self.assertEqual([Relation(from_task_id="mapper_name_1", to_task_id="mapper_name_2")], relations)
def test_prepend_task_some_relations(self):
task_1 = Task(task_id=TEST_MAPPER_NAME + "_1", template_name="pig.tpl")
task_2 = Task(task_id=TEST_MAPPER_NAME + "_2", template_name="pig.tpl")
task_3 = Task(task_id=TEST_MAPPER_NAME + "_3", template_name="pig.tpl")
tasks, relations = ActionMapper.prepend_task(
task_to_prepend=task_1,
tasks=[task_2, task_3],
relations=[Relation(from_task_id="mapper_name_2", to_task_id="mapper_name_3")],
)
self.assertEqual([task_1, task_2, task_3], tasks)
self.assertEqual(
[
Relation(from_task_id="mapper_name_1", to_task_id="mapper_name_2"),
Relation(from_task_id="mapper_name_2", to_task_id="mapper_name_3"),
],
relations,
)
| 2,432 | 832 |
def euclidean_gcd(first, second):
"""
Calculates GCD of two numbers using the division-based Euclidean Algorithm
:param first: First number
:param second: Second number
"""
while(second):
first, second = second, first % second
return first
def euclidean_gcd_recursive(first, second):
"""
Calculates GCD of two numbers using the recursive Euclidean Algorithm
:param first: First number
:param second: Second number
"""
if not second:
return first
return euclidean_gcd_recursive(second, first % second)
def main():
first, second = map(int, input('Enter 2 integers: ').split())
print('Division-based: GCD of {} and {} is: {}'.format(first,
second,
euclidean_gcd(
first, second)))
print('Recursive: GCD of {} and {} is: {}'.format(first,
second,
euclidean_gcd_recursive(
first, second)))
if __name__ == '__main__':
main()
| 1,265 | 304 |
""" Distances metrics based on the covariance matrix (mostly in the context of merging and compress)"""
import torch
import numpy as np
import torch.nn.functional as F
np.random.seed(0)
def cov(m, y=None):
"""computes covariance of m"""
if y is not None:
m = torch.cat((m, y), dim=0)
m_exp = torch.mean(m, dim=1)
x = m - m_exp[:, None]
cov = 1 / (x.size(1) - 1) * x.mm(x.t())
return cov
def cov_norm(m, y):
"""computes similarity of x, y covariance matrices"""
m = (m - m.mean(dim=0)) / m.std(dim=0)
y = (y - y.mean(dim=0)) / y.std(dim=0)
# print(m.size())
# print(y.size())
m = cov(m)
y = cov(y)
return torch.norm(m) - torch.norm(y)
def get_svd(m, y):
m = (m - m.mean(dim=0)) / m.std(dim=0)
y = (y - y.mean(dim=0)) / y.std(dim=0)
u1, s1, v1 = torch.svd(m)
u2, s2, v2 = torch.svd(y)
return s1, s2
def cov_eig(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
s1, s2 = get_svd(m, y)
d = (s1 - s2) if k is None else (s1[:k] - s2[:k])
d = d.sum().abs()
return d
def cov_eig_kl(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
s1, s2 = get_svd(m, y)
if k is not None: s1, s2 = s1[:k] - s2[:k]
d = F.kl_div(F.softmax(s1) - F.softmax(s2))
return d
def cov_kl(m, y, k=None):
"""computes similarity of x, y covariance matrices"""
m_p = F.softmax(m.flatten())
y_p = F.softmax(y.flatten())
d = F.kl_div(m_p, y_p)
return d
if __name__ == "__main__":
x = torch.randn((100, 20))
y = torch.randn((100, 50))
print(cov_norm(x, y))
| 1,627 | 750 |
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
class TFHubContext:
def __init__(self, url="https://tfhub.dev/google/universal-sentence-encoder-large/3") -> None:
super().__init__()
print('Initialize graph:')
# Create graph and finalize (finalizing optional but recommended).
self.g = tf.Graph()
with self.g.as_default():
# We will be feeding 1D tensors of text into the graph.
self.text_input = tf.placeholder(dtype=tf.string, shape=[None])
self.embed = hub.Module(url)
self.embedded_text = self.get_embedded_text()
self.init_op = tf.group([tf.global_variables_initializer(), tf.tables_initializer()])
self.g.finalize()
def get_embedded_text(self):
return self.embed(self.text_input)
def get_embedding(self, texts):
# Reduce logging output.
# tf.logging.set_verbosity(tf.logging.ERROR)
with tf.Session(graph=self.g) as session:
session.run(self.init_op)
texts_embeddings = session.run(self.embedded_text, feed_dict={self.text_input: texts})
# for i, message_embedding in enumerate(np.array(texts_embeddings).tolist()):
# print("Message: {}".format(texts[i]))
# print("Embedding size: {}".format(len(message_embedding)))
# message_embedding_snippet = ", ".join(
# (str(x) for x in message_embedding[:3]))
# print("Embedding: [{}, ...]\n".format(message_embedding_snippet))
return texts_embeddings
def close(self):
print('TFHubContext closed')
class ElmoTFHubContext(TFHubContext):
def __init__(self, url="https://tfhub.dev/google/elmo/2", type='elmo') -> None:
super().__init__(url)
self.type = type
def get_embedded_text(self):
return self.embed(self.text_input, signature='default', as_dict=True)
def get_embedding(self, texts):
# Reduce logging output.
# tf.logging.set_verbosity(tf.logging.ERROR)
with tf.Session(graph=self.g) as session:
session.run(self.init_op)
texts_embeddings = session.run(self.embedded_text, feed_dict={self.text_input: texts})[self.type]
# for i, message_embedding in enumerate(np.array(texts_embeddings).tolist()):
# print("Message: {}".format(texts[i]))
# print("Embedding size: {}".format(len(message_embedding)))
# message_embedding_snippet = ", ".join(
# (str(x) for x in message_embedding[:3]))
# print("Embedding: [{}, ...]\n".format(message_embedding_snippet))
return texts_embeddings
def get_use_embedding(texts):
use_embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-large/3")
# Reduce logging output.
# tf.logging.set_verbosity(tf.logging.ERROR)
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
texts_embeddings = session.run(use_embed(texts))
for i, message_embedding in enumerate(np.array(texts_embeddings).tolist()):
print("Message: {}".format(texts[i]))
print("Embedding size: {}".format(len(message_embedding)))
message_embedding_snippet = ", ".join(
(str(x) for x in message_embedding[:3]))
print("Embedding: [{}, ...]\n".format(message_embedding_snippet))
return texts_embeddings
if __name__ == '__main__':
emb = ElmoTFHubContext(type='default')
tt = emb.get_embedding(['This is a sentence.', 'This is another sentence.'])
print(tt.shape) | 3,415 | 1,170 |
from __future__ import unicode_literals
from netmiko.endace.endace_ssh import EndaceSSH
__all__ = ['EndaceSSH']
| 117 | 46 |
# -*- coding: utf-8 -*-
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
from genemap.mappers import get_mappers
def main(args):
"""Main function."""
mapper = args.mapper.from_args(args)
mapped = mapper.map_ids(args.ids)
print(' '.join(mapped))
def configure_subparser(subparser):
"""Configures subparser for subcommand."""
parser = subparser.add_parser('map_ids')
parser.set_defaults(main=main)
mapper_subparser = parser.add_subparsers(dest='mapper')
mapper_subparser.required = True
mappers = get_mappers(with_command_line=True).items()
for name, class_ in mappers:
mapper_parser = mapper_subparser.add_parser(name)
class_.configure_parser(mapper_parser)
mapper_parser.add_argument('ids', nargs='+')
mapper_parser.set_defaults(mapper=class_)
| 1,020 | 351 |
import gzip
import random
import subprocess
import sys
def get_acceptors(filename):
accs = []
with gzip.open(filename, 'rt') as fp:
for line in fp.readlines():
(exon1, intron, exon2, expression, gene) = line.split()
s1 = intron[-22:-2]
s2 = intron[-2:]
s3 = exon2[0:20]
accs.append((s1, s2, s3, expression))
random.shuffle(accs)
return accs
def get_donors(filename):
dons = []
with gzip.open(filename, 'rt') as fp:
for line in fp.readlines():
(exon1, intron, exon2, expression, gene) = line.split()
s1 = exon1[-20:]
s2 = intron[0:2]
s3 = intron[2:22]
dons.append((s1, s2, s3, expression))
return dons
def write_fasta(filename, name, seqs):
with open(filename, 'w') as fp:
n = 1
for s1, s2, s3, x in seqs:
fp.write(f'>{name}-{n} {x}\n')
fp.write(f'{s1}{s2}{s3}\n')
n += 1
def randomseq(size, contents='ACGT'):
seq = ''
for i in range(size):
seq += random.choice(contents)
return seq
def make_negative1(seqs):
neg = []
for i in range(len(seqs)):
s1 = randomseq(20)
s2 = seqs[0][1] # either GT or AG
s3 = randomseq(20)
x = 0
neg.append((s1, s2, s3, x))
return neg
def make_negative2(seqs):
s1seq = '' # composition of part 1
s3seq = '' # composition of part 2
for s1, s2, s3, x in seqs:
s1seq += s1
s3seq += s3
neg = []
for i in range(len(seqs)):
s1 = randomseq(20, s1seq)
s2 = seqs[0][1] # either GT or AG
s3 = randomseq(20, s3seq)
x = 0
neg.append((s1, s2, s3, x))
return neg
def make_negative3(seqs):
col1 = [[] for i in range(20)]
col3 = [[] for i in range(20)]
for s1, s2, s3, x in seqs:
for i in range(20):
col1[i].append(s1[i])
col3[i].append(s3[i])
neg = []
for i in range(len(seqs)):
s1 = ''
s3 = ''
for j in range(20):
s1 += random.choice(col1[j])
s3 += random.choice(col3[j])
s2 = seqs[0][1] # either GT or AG
x = 0
neg.append((s1, s2, s3, x))
return neg
def make_negative4(seqs):
comp = str.maketrans('ACGTRYMKWSBDHV', 'TGCAYRKMWSVHDB')
neg = []
with gzip.open(filename, 'rt') as fp:
for line in fp.readlines():
(exon1, intron, exon2, expression, gene) = line.split()
seq = exon1 + intron + exon2
anti = seq.translate(comp)[::-1]
for i in range(20, len(seq) -20):
if anti[i:i+2] == 'GT':
pass # this is actually completed elsewhere and not checked in...
#############
# 42 nt set # 20 nt upstream and downstream of canonical GT|AG
#############
genomes = ('at', 'ce', 'dm')
for gen in genomes:
# observed
eie = f'eie.{gen}.txt.gz'
dons = get_donors(eie)
accs = get_acceptors(eie)
write_fasta(f'splice42/{gen}.don.fa', 'don', dons)
write_fasta(f'splice42/{gen}.acc.fa', 'acc', accs)
# negative 1 - totally random
nd = make_negative1(dons)
na = make_negative1(accs)
write_fasta(f'splice42/{gen}.n1don.fa', 'n1don', nd)
write_fasta(f'splice42/{gen}.n1acc.fa', 'n1acc', na)
# negative 2 - compositional but not positional
nd = make_negative2(dons)
na = make_negative2(accs)
write_fasta(f'splice42/{gen}.n2don.fa', 'n2don', nd)
write_fasta(f'splice42/{gen}.n2acc.fa', 'n2acc', na)
# negative 3 - compositional and positional
nd = make_negative3(dons)
na = make_negative3(accs)
write_fasta(f'splice42/{gen}.n3don.fa', 'n3don', nd)
write_fasta(f'splice42/{gen}.n3acc.fa', 'n3acc', na)
write_fasta(f'data42/{gen}.n3don.fa', 'n3don', nd)
write_fasta(f'data42/{gen}.n3acc.fa', 'n3acc', na)
# negative 4 - sequences from the opposite strand
nd, na = make_negative4(eie)
| 3,463 | 1,660 |
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def get_columns(db, col1, col2):
inputs = db[[col1, col2]]
coords = inputs.as_matrix(columns=None)
return np.array(coords)
def plot_colored_graph(inputs, kmeans_result):
x = inputs.transpose()
df = pd.DataFrame(dict(
crime=x[0],
dias_para_completar=x[1],
color=x[0]
))
sns.lmplot('crime', 'dias_para_completar', data=df, hue='color', fit_reg=False)
plt.title('Tempo para finalizar um crime')
clusterX = [row[0] for row in kmeans_result]
clusterY = [row[1] for row in kmeans_result]
plt.plot(clusterX, clusterY, 'rs')
plt.show()
def find_elbow(inputs, max_k):
distorsions = []
for k in max_k:
kmeans = KMeans(n_clusters=k)
kmeans.fit(inputs)
distorsions.append(kmeans.inertia_)
plt.plot(max_k, distorsions)
plt.title('Elbow curve')
def main():
# Load dataset
crimes_db = pd.read_csv('base/result_min.csv')
inputs = get_columns(crimes_db, 'description', 'clearance_days')
# find best k
find_elbow(inputs, range(2, 20))
# run k-means
kmeans = KMeans(n_clusters=8, random_state=0).fit(inputs)
print(kmeans.cluster_centers_)
plot_colored_graph(inputs, kmeans.cluster_centers_)
main()
| 1,300 | 528 |
# Generated by Django 2.1.4 on 2018-12-12 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.IntegerField(blank=True, null=True)),
('date', models.DateField(blank=True, null=True)),
('name', models.CharField(blank=True, max_length=100)),
('title', models.CharField(blank=True, max_length=100)),
('permissions', models.CharField(blank=True, max_length=20)),
],
),
]
| 783 | 235 |
import os
import pandas
import time
from datetime import datetime, timedelta
from collections import defaultdict
from copy import deepcopy
from googleapiclient.discovery import build
"""
All functions that are used for querying, processing, and saving
the data are located here.
"""
VALID_PERIOD_LENGTHS = ["day", "week", "month"]
class SearchSampler(object):
"""
TrendsSampler contains all functions required to sample the Google Health API
:param api_key: The API key you received from Google
:param search_name: A suffix for your output file. It will be placed in the `{output_path}/{region}`\
folder with the filename `{region}-{search_name}.csv`.
:param search_params: A dictionary containing parameters. Must contain keys with:\
`search_term, region, period_start, period_end, period_length`\
Example: {\
"region": "US-DC",\
"search_term": "test",\
"period_start": "2017-01-01",\
"period_end": "2017-01-31",\
"period_length": "week"\
}\
The `search_term` can be a single string, or a list of strings. It can also include Boolean logic.\
See the report methodology for more details. The `region` can be a country, state, or DMA.\
States are formatted like `US-CA`, DMAs are a 3-digit code (see Nielsen for info).\
The `period_start` and `period_end` parameters need to be in the format `YYYY-MM-DD`.\
The `period_length` can be "day", "week", or "month" - but we have only tested this extensively\
with week.
:param server: The endpoint to which requests will be made (default is "https://www.googleapis.com")
:param version: The API version to use (default is `v1beta`)
:param output_path: The path to the folder where query results will be saved (folder will be created\
if it doesn't already exist.)
:Example:
>>> params = {
'search_term': ['cough', 'sneeze', 'fever'],
'region': 'US-DC',
'period_start': '2017-01-01',
'period_end': '2017-02-01',
'period_length': 'week'
}
>>> search_name = "flu_symptoms"
>>> output_path = "data"
>>> num_samples = 5
>>> from SearchSampler.sampler import SearchSampler
>>> sampler = SearchSampler(api_key, search_name, params, output_path=output_path)
>>> df_results = sampler.pull_rolling_window(num_samples=num_samples)
>>> sampler.save_file(df_results, append=True)
"""
def __init__(
self,
api_key,
search_name,
search_params,
server="https://www.googleapis.com",
version="v1beta",
output_path="data"
):
# Basic variables
if not api_key:
raise SystemError('ERROR: Must provide an api_key as the first parameter')
self._search_name = search_name
self._server = server
self._version = version
self.service = self._get_service(api_key)
# Below exception is to ensure that people actually provide something for an output_path
if output_path == "":
raise ValueError("Please provide an output path")
self.output_path = output_path
## Search parameters
# Initialize a dictionary with default parameters
self.params = {
"search_term": None,
"region": None,
"period_start": None,
"period_end": None,
"period_length": "week"
}
# Force search_term to be a dictionary
if not isinstance(search_params, dict):
raise ValueError('ERROR: search_params needs to be a dictionary')
if type(search_params.get("search_term", None)) == str:
search_params["search_term"] = [search_params["search_term"]]
self.params.update(search_params)
for k, v in self.params.items():
if not v:
raise SystemError('ERROR: Must provide a {}'.format(k))
# Check that start date is before end date
if self.params['period_end'] < self.params['period_start']:
raise ValueError('ERROR: start of period must be before end of period')
def _get_service(self, api_key):
"""
Sets up the connection to the Google Trends Health API
:param api_key: API Key
:return: Properly configured API object
"""
url = "/".join([
str(self._server),
'discovery/v1/apis/trends',
str(self._version),
"rest"
])
service = build(
'trends',
self._version,
developerKey=api_key,
discoveryServiceUrl=url
)
return service
def _get_file_path(self):
"""
:return: 2-tuple containing the file path and file name
"""
str_path = os.path.join(str(self.output_path), str(self.params["region"]))
str_file_name = '{region}-{identifier}.csv'.format(
region=self.params['region'],
identifier=self._search_name
)
return (str_path, str_file_name)
def load_file(self):
"""
Loads a csv file for later analysis, based on naming scheme used within class
:return: Pandas dataframe
"""
load_path, load_filename = self._get_file_path()
full_file_path = os.path.join(str(load_path), str(load_filename))
print('Attempting to load local file: {}'.format(full_file_path))
return pandas.read_csv(full_file_path)
def save_file(self, df, append=True):
"""
Saves data in df to folder, based on the following structure\:
`{output_path}/{region}/{region}-{search_identifier}.csv`
:param df: Dataframe to save. Expects format\: Period, value (though names don't matter)
:param append: Whether or not to add the new results to an existing file with the same name.\
Setting this to `False` will overwrite any existing file.
:return: None
"""
# set up paths and file name
load_path, load_filename = self._get_file_path()
# Verify the directory exists; if not, create
if not os.path.exists(load_path):
os.makedirs(load_path)
# If appending results, load previous results and join
else:
if append:
try:
df_prev_results = self.load_file()
except FileNotFoundError:
print('No previous data found. Will save to new file')
else:
df = pandas.concat([df_prev_results, df])
full_file_path = os.path.join(str(load_path), str(load_filename))
print('Saving local file: {}'.format(full_file_path))
df.to_csv(full_file_path, encoding='utf-8', index=False)
def _perform_pull(self, graph_object, attempt=0, sleep_minutes=1, limit=20):
"""
Given a connection object to the API, return a set of unformatted data. This method
accommodates API connection problems up to the specified limit (default 20).
:param graph_object: Properly formatted
:param attempt: Internal, do not use. Function uses in instances in which the API fails.
:param sleep_minutes:
:param limit:
:return: Unformatted data from API
"""
# Call API
# Enclosed in a try/except block because the API will randomly return a Rate Limit exceeded error
# Usually as an HTTPError
try:
response_health = graph_object.execute()
except Exception as msg:
attempt += 1
if attempt <= limit:
if attempt % 5 == 0:
print(
'WARNING: Attempt #{}. This may require an extended period. Sleeping for 5 minutes. \
Error message:\n {}'.format(attempt, str(msg))
)
# Sleep for 5 minutes
time.sleep(5 * 60)
else:
print(
'WARNING: Attempt #{}. Sleeping for just 1 minute. \
Error message:\n {}'.format(attempt, str(msg))
)
# Sleep for 1 minutes
time.sleep(sleep_minutes * 60)
response_health = self._perform_pull(graph_object, attempt)
else:
# Give up entirely
raise SystemError("Attempted query 5 times and couldn't connect")
response_health = None
return response_health
def pull_data_from_api(self, params=None, format='dict'):
"""
Pulls data from the API given a set of search terms and other restrictions.
:param params: Set of search parameters. Uses the object-level search params (from __init__) if empty.
:return: Dataframe with results from API that match parameters.
"""
# set local parameters to class parameters if necessary
if not params:
params = deepcopy(self.params)
# Check period_length
if params['period_length'] not in VALID_PERIOD_LENGTHS:
raise SystemError('Period length {} is of the wrong type.'.format(params['period_length']))
# Check region type. Because this changes the parameters in the API call, this sets up the API call
# See the difference between geoRestriction_region, _country, and _dma
if isinstance(params['region'], list):
test_region = str(params['region'][0])
params['region'] = "'{}'".format("', '".join(str(params['region'])))
else:
test_region = str(params['region'])
if test_region[:2] == 'US':
# nation-wide
if test_region == 'US':
graph_health = self.service.getTimelinesForHealth(
terms=params['search_term'],
geoRestriction_country=params['region'],
time_startDate=params['period_start'],
time_endDate=params['period_end'],
timelineResolution=params['period_length']
)
# Can only use multiple values for states and DMAs
# Cannot mix national, state or DMA in the same call, unfortunately
# Valid options are ISO-3166-2
else:
graph_health = self.service.getTimelinesForHealth(
terms=params['search_term'],
geoRestriction_region=params['region'],
time_startDate=params['period_start'],
time_endDate=params['period_end'],
timelineResolution=params['period_length']
)
else:
# This assumes a DMA
# To properly retrieve data, it needs to be a number, so test for this first
# For more, see: https://support.google.com/richmedia/answer/2745487
if not isinstance(params['region'], int):
raise ValueError('Region "{}" is not an integer, but looks like it is meant to be a DMA' \
.format(params['region']))
# otherwise
graph_health = self.service.getTimelinesForHealth(
terms=params['search_term'],
geoRestriction_dma=params['region'],
time_startDate=params['period_start'],
time_endDate=params['period_end'],
timelineResolution=params['period_length']
)
# Now, finally, call the API
print('INFO: Running period {} - {}'.format(params['period_start'], params['period_end']))
response_health = self._perform_pull(graph_health)
if not response_health:
return None
else:
d_results = {}
for results in response_health['lines']:
curr_term = results['term']
df = pandas.DataFrame(results['points'])
# re-format date into actual date objects
try:
df['period'] = pandas.to_datetime(df.date, format='%b %d %Y')
except:
df['period'] = pandas.to_datetime(df.date, format='%b %Y')
d_results[curr_term] = df
if format == 'dataframe':
# process of saving is slightly different when asking for multiple
# search terms than for just one
# Need to convert from a dictionary of dataframes
if len(d_results) > 1:
df = pandas.concat(d_results).reset_index()[['level_0', 'date', 'value', 'period']]
df = df.rename(columns={'level_0':'search_term'})
else:
df = pandas.DataFrame(d_results)
return df
elif format == 'dict':
return d_results
else:
raise ValueError("Please provide a proper format for results. Available options are: dict, dataframe.")
def _serialize_period_values(self, df, dd_periods=None, lst_periods=None):
"""
Converts sample into period specific list of values. Assumes dd_periods is a defaultdict
:param df: Dataframe with sample values. Must at least have the columns [period, value]
:param dd_periods: A dictionary, with periods as keys and lists of query results as values
:param lst_periods: A list of valid periods
:return: dd_periods with added values
"""
if not lst_periods:
lst_periods = []
if not dd_periods:
dd_periods = defaultdict(list)
for index, row in df.iterrows():
# If a list of periods was provided, we only expand dd_periods for the ones that were specified
if len(lst_periods) > 0:
if row['period'] in lst_periods:
dd_periods[row['period']].append(row['value'])
else:
dd_periods[row['period']].append(row['value'])
return dd_periods
def pull_rolling_window(self, num_samples=5):
"""
Separates pull into a rolling set of samples to get multiple samples in the same run.
This takes advantage of the fact that the API does not cache results if you change the length of time
in the search
:param num_samples: Amount of samples to pull
:return: Dataframe with results from API. Does not include information about the sample frame.
"""
query_time = datetime.now()
# First we run a single query, so we can get the dates for each period from the API.
# Could do this logic locally, but this is easier
local_params = deepcopy(self.params)
local_params['search_term'] = local_params['search_term'][0]
samples_taken = 0
d_range_all = self.pull_data_from_api(local_params)
lst_periods = list(d_range_all.values())[0]['period'].tolist()
d_periods = {}
# Next, we pull each week individually. This will always get saved.
print("INFO: Running Search Term: {}".format(self.params['search_term']))
for period in lst_periods:
curr_date = datetime.strftime(period, '%Y-%m-%d')
local_params = deepcopy(self.params)
local_params['period_start'] = curr_date
local_params['period_end'] = curr_date
d_single = self.pull_data_from_api(local_params)
if not d_single:
raise ValueError('Problems with period {}'.format(curr_date))
for term, result in d_single.items():
if term in d_periods:
d_periods[term] = self._serialize_period_values(result, dd_periods=d_periods[term])
else:
d_periods[term] = self._serialize_period_values(result, dd_periods=defaultdict(list))
# Increment samples taken by 1 - since each period has been sampled individually
samples_taken += 1
# Now do the rolling sample
# Using some logic to figure out the window size and how far back to go
# First, we get the window size
window_size = num_samples - samples_taken
print("INFO: window_size: {}".format(str(window_size)))
# If in the above samples we've already gotten all that we've asked for, no need to do the rest
if window_size > 0:
# There's a weird race condition in which window_size = 1, but we've already done the single period samples
# So we just change this to a 2 period window size and they get an extra sample
if window_size == 1:
window_size = 2
# Calculate days before and after, erring on the side of having more periods...
# So that we have symmetry between sides if there are an odd number of weeks
local_params = deepcopy(self.params)
days_diff = window_size * 7
# Get the starting period, specifying that the first window is window_size before the first date
starting_period = lst_periods[0] - timedelta(days=days_diff)
# Get the ending period, specifying that the last window is window_size after the last date
ending_period = lst_periods[-1] + timedelta(days=days_diff)
# Set up the loop
# Initial window is (starting_period) to (starting_period + window_size)
curr_start = starting_period
curr_end = curr_start + timedelta(days=days_diff)
# Loop until each window is done
while curr_end <= ending_period:
# Set up query params
local_params['period_start'] = datetime.strftime(curr_start, '%Y-%m-%d')
local_params['period_end'] = datetime.strftime(curr_end, '%Y-%m-%d')
# Call the API
d_window = self.pull_data_from_api(local_params)
# Save the results
for term, result in d_window.items():
d_periods[term] = self._serialize_period_values(
result,
dd_periods=d_periods[term],
lst_periods=lst_periods
)
# Increment the window by one week
curr_start += timedelta(days=7)
curr_end += timedelta(days=7)
rows = []
for term, timestamps in d_periods.items():
for timestamp, samples in timestamps.items():
for i, sample in enumerate(samples):
if i < num_samples:
# Due to the sampling method, we sometimes draw an extra sample
# This will skip over that
rows.append({
"term": term,
"period": timestamp,
"sample": i,
"value": sample,
"query_time": query_time
})
return pandas.DataFrame(rows)
| 19,216 | 5,095 |
import os
from dotenv import load_dotenv
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from Cryptodome.Signature import PKCS1_v1_5 as Signature_pkcs1_v1_5
from Cryptodome.PublicKey import RSA
load_dotenv()
API_URI = os.environ.get("API_URI", "https://nodes.thetangle.org:443").split(",")
API_OPEN = os.environ.get("API_OPEN", "https://nodes.thetangle.org:443")
# encrypt
PLAT_RSA_PUB_KEY = RSA.importKey(open("rsa/plat_rsa_public.pem").read())
AMI_CIPHER = Cipher_pkcs1_v1_5.new(PLAT_RSA_PUB_KEY)
# signature
AMI_RSA_PRI_KEY = RSA.importKey(open("rsa/ami_rsa_private.pem").read())
AMI_SIGNER = Signature_pkcs1_v1_5.new(AMI_RSA_PRI_KEY)
| 659 | 305 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting users."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute.users import client as users_client
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Delete(base.DeleteCommand):
"""Delete Google Compute Engine users.
*{command}* deletes one or more Google Compute Engine users.
## EXAMPLES
To delete one or more users by name, run:
$ {command} example-user-1 example-user-2
To delete all users for one or more owners, run:
$ {command} example-owner-1@gmail.com example-owner-2@gmail.com --owners
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--owners',
action='store_true',
help=('The owner of the user to be created. The owner must be an email '
'address associated with a Google account'))
parser.add_argument(
'names',
metavar='NAME',
nargs='+',
help='The names of the users to delete.')
def GetOwnerAccounts(self, client, owners):
"""Look up all users on the current project owned by the list of owners."""
requests = []
for owner in owners:
requests += lister.FormatListRequests(
client.users,
properties.VALUES.core.project.GetOrFail(), None, None,
'owner eq ' + owner)
errors = []
responses = request_helper.MakeRequests(
requests=requests,
http=client.http,
batch_url='https://www.googleapis.com/batch/',
errors=errors)
if errors:
utils.RaiseException(errors, users_client.UserException, error_message=(
'Could not get users for owners:'))
return [response.name for response in responses]
def Run(self, args):
"""Issues requests necessary for deleting users."""
holder = base_classes.ComputeUserAccountsApiHolder(self.ReleaseTrack())
client = holder.client
if args.owners:
names = self.GetOwnerAccounts(client, args.names)
else:
names = args.names
user_refs = [holder.resources.Parse(
user,
params={'project': properties.VALUES.core.project.GetOrFail},
collection='clouduseraccounts.users') for user in names]
utils.PromptForDeletion(user_refs)
requests = []
for user_ref in user_refs:
request = client.MESSAGES_MODULE.ClouduseraccountsUsersDeleteRequest(
project=user_ref.project,
user=user_ref.Name())
requests.append((client.users, 'Delete', request))
errors = []
responses = list(
request_helper.MakeRequests(
requests=requests,
http=client.http,
batch_url='https://www.googleapis.com/batch/',
errors=errors))
if errors:
utils.RaiseToolException(
errors, error_message='Could not fetch resource:')
return responses
| 3,620 | 1,042 |
from .global_hac import GlobalHAC
| 34 | 12 |
from dataclasses import dataclass
from functools import total_ordering
from collections import Counter
import typing
import textwrap
@dataclass(frozen=True)
@total_ordering
class Point:
x: int
y: int
def __add__(self, they):
return Point(self.x + they.x, self.y + they.y)
def __sub__(self, they):
return Point(self.x - they.x, self.y - they.y)
def reflect(self):
return Point(-self.x, self.y)
def rotate(self):
return Point(-self.y, self.x)
def __lt__(self, they):
return (self.x, self.y) < (they.x, they.y)
Poly = typing.Tuple[Point, ...]
def reflect(poly: Poly) -> Poly:
return tuple(p.reflect() for p in poly)
def rotate(poly: Poly) -> Poly:
return tuple(p.rotate() for p in poly)
def minimal_repr(poly: Poly) -> Poly:
points = sorted(poly)
return tuple(p - points[0] for p in points)
def normalize(poly: Poly) -> Poly:
def all_repr(poly):
for i in range(4):
yield poly
yield reflect(poly)
poly = rotate(poly)
min_repr = min(minimal_repr(r) for r in all_repr(poly))
return min_repr
def generate_from_poly(poly) -> typing.Generator[Poly, None, None]:
points = set(poly)
for p in poly:
for df in ((0, 1), (0, -1), (1, 0), (-1, 0)):
q = p + Point(df[0], df[1])
if q in points:
continue
new_poly = normalize((*poly, q))
yield new_poly
def generate(n: int) -> typing.List[Poly]:
if n == 1:
return [(Point(0, 0),)]
prev_results = generate(n - 1)
results = set()
for prev_poly in prev_results:
results.update(generate_from_poly(prev_poly))
return list(results)
def hexo_borders(poly: Poly) -> typing.List[typing.Tuple[Point, Point]]:
dfs = tuple(Point(x, y) for x, y in ((0, 0), (0, 1), (1, 1), (1, 0)))
counter = Counter()
for tile in poly:
for i in range(4):
d1 = dfs[i]
d2 = dfs[(i+1) % 4]
if d1 < d2:
d1, d2 = d2, d1
border = (tile + d1, tile + d2)
counter[border] += 1
outer_borders = [border for border, cnt in counter.items() if cnt == 1]
return outer_borders
def hexo_to_repr(poly: Poly) -> str:
assert len(poly) == 6
tiles_str = ', '.join(f'Pos {{ x: {p.x}, y: {p.y} }}' for p in poly)
borders = hexo_borders(poly)
borders_str = ', '.join(
f'(Pos {{ x: {p1.x}, y: {p1.y} }}, Pos {{ x: {p2.x}, y: {p2.y} }})'
for (p1, p2) in borders)
return (
f'''__Hexo {{
tiles: [{tiles_str}],
borders: &[{borders_str}],
}}''')
if __name__ == '__main__':
codegen_template = textwrap.dedent(
'''\
#[cfg(not(test))]
pub const N_HEXOS: usize = {n_hexos};
#[cfg(not(test))]
pub const HEXOS: [__Hexo; {n_hexos}] = [
{hexos}
];
'''
)
I = tuple(Point(0, y) for y in range(6))
hexos = [poly for poly in generate(6) if poly != I]
hexos_str = ',\n '.join(hexo_to_repr(hexo) for hexo in hexos)
print(codegen_template.format(n_hexos = len(hexos), hexos = hexos_str))
| 3,195 | 1,188 |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype forward reference data submodule.**
This submodule exercises **forward reference type hints** (i.e., strings whose
values are the names of classes and tuples of classes, one or more of which
typically have yet to be defined) support implemented in the
:func:`beartype.beartype` decorator. This support can *only* be fully exercised
from within an independent data submodule rather than the body of a unit test.
Why? Because:
* That decorator is only safely importable from within the body of a unit test.
* Forward reference type hints can only refer to objects defined at module
scope rather than from within the body of a unit test.
* Forward reference type hints referring to objects previously defined at
module scope fail to exercise the deferred nature of forward references.
* Ergo, callables that are decorated by that decorator, annotated by one or
more forward reference type hints, and both declared and called from within
the body of a unit test fail to exercise this deferred nature.
* Ergo, only callables that are decorated by that decorator, annotated by one or
more forward reference type hints, and both declared and called at module
scope before their referents exercise this deferred nature.
'''
# ....................{ IMPORTS }....................
from beartype import beartype
from typing import Union
# ....................{ CALLABLES }....................
# Decorated callable annotated by a PEP-noncompliant fully-qualified forward
# reference referring to a type that has yet to be declared.
TheDarkestForwardRefOfTheYear = (
'beartype_test.a00_unit.data.hint.data_hintref.TheDarkestEveningOfTheYear')
@beartype
def the_woods_are_lovely(dark_and_deep: TheDarkestForwardRefOfTheYear) -> (
TheDarkestForwardRefOfTheYear):
return dark_and_deep
# Decorated callable annotated by a PEP-noncompliant tuple containing both
# standard types and a fully-qualified forward reference referring to a type
# that has yet to be declared.
TheDarkestTupleOfTheYear = (complex, TheDarkestForwardRefOfTheYear, bool)
@beartype
def of_easy_wind(and_downy_flake: TheDarkestTupleOfTheYear) -> (
TheDarkestTupleOfTheYear):
return and_downy_flake
# Decorated callable annotated by a PEP-compliant unnested unqualified forward
# reference referring to a type that has yet to be declared.
@beartype
def stopping_by_woods_on(a_snowy_evening: 'TheDarkestEveningOfTheYear') -> (
'TheDarkestEveningOfTheYear'):
return a_snowy_evening
# Decorated callable annotated by a PEP-compliant nested unqualified forward
# reference referring to a type that has yet to be declared.
TheDarkestUnionOfTheYear = Union[complex, 'TheDarkestEveningOfTheYear', bytes]
@beartype
def but_i_have_promises(to_keep: TheDarkestUnionOfTheYear) -> (
TheDarkestUnionOfTheYear):
return to_keep
# ....................{ CLASSES }....................
# User-defined class previously referred to by forward references above.
class TheDarkestEveningOfTheYear(str): pass
| 3,270 | 940 |
# coding: utf-8
StatusContinue = 100
StatusSwitchingProtocols = 101
StatusProcessing = 102
StatusEarlyHints = 103
StatusOK = 200
StatusCreated = 201
StatusAccepted = 202
StatusNonAuthoritativeInfo = 203
StatusNoContent = 204
StatusResetContent = 205
StatusPartialContent = 206
StatusMultiStatus = 207
StatusAlreadyReported = 208
StatusIMUsed = 226
StatusMultipleChoices = 300
StatusMovedPermanently = 301
StatusFound = 302
StatusSeeOther = 303
StatusNotModified = 304
StatusUseProxy = 305
StatusTemporaryRedirect = 307
StatusPermanentRedirect = 308
StatusBadRequest = 400
StatusUnauthorized = 401
StatusPaymentRequired = 402
StatusForbidden = 403
StatusNotFound = 404
StatusMethodNotAllowed = 405
StatusNotAcceptable = 406
StatusProxyAuthRequired = 407
StatusRequestTimeout = 408
StatusConflict = 409
StatusGone = 410
StatusLengthRequired = 411
StatusPreconditionFailed = 412
StatusRequestEntityTooLarge = 413
StatusRequestURITooLong = 414
StatusUnsupportedMediaType = 415
StatusRequestedRangeNotSatisfiable = 416
StatusExpectationFailed = 417
StatusTeapot = 418
StatusMisdirectedRequest = 421
StatusUnprocessableEntity = 422
StatusLocked = 423
StatusFailedDependency = 424
StatusTooEarly = 425
StatusUpgradeRequired = 426
StatusPreconditionRequired = 428
StatusTooManyRequests = 429
StatusRequestHeaderFieldsTooLarge = 431
StatusUnavailableForLegalReasons = 451
StatusInternalServerError = 500
StatusNotImplemented = 501
StatusBadGateway = 502
StatusServiceUnavailable = 503
StatusGatewayTimeout = 504
StatusHTTPVersionNotSupported = 505
StatusVariantAlsoNegotiates = 506
StatusInsufficientStorage = 507
StatusLoopDetected = 508
StatusNotExtended = 510
StatusNetworkAuthenticationRequired = 511
| 1,701 | 663 |
import logging
import oscar
x0a_name="User lookup"
log = logging.getLogger('oscar.snac.x0a')
subcodes = {}
def x0a_init(o, sock, cb):
log.info('initializing')
cb()
log.info('finished initializing')
def x0a_x01(o, sock, data):
'''
SNAC (xa, x1): User lookup Family Error
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_01.html}
'''
errcode, errmsg, subcode = oscar.snac.error(data)
submsg = subcodes.setdefault(subcode, 'Unknown') if subcode else None
raise oscar.snac.SnacError(0x0a, (errcode, errmsg), (subcode, submsg))
def x0a_x02(email):
'''
SNAC (xa, x2): Search by email
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_02.html}
'''
return 0x0a, 0x02, email
def x0a_x03(o, sock, data):
'''
SNAC (xa, x3): Search response
reference: U{http://iserverd.khstu.ru/oscar/snac_0a_03.html}
'''
fmt = (('tlvs', 'tlv_list'),)
name_tlvs, data = oscar.unpack(fmt, data)
assert not data
names = [tlv.v for tlv in name_tlvs]
| 1,064 | 469 |
#!/usr/bin/env python
"""setup.py
Defines the setup instructions for the punch framework
Copyright (C) 2016 Rodrigo Chacon
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
extra_kwargs = {'tests_require': ['pytest']}
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
sys.exit(pytest.main())
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError, RuntimeError):
readme = ''
setup(name='punch',
version='0.0.1',
description='A Python framework focused (but not limited) in JSON APIs.',
long_description=readme,
author='Rodrigo Chacon',
author_email='rochacon@gmail.com',
url='https://github.com/rochacon/punch',
license='MIT',
packages=['punch'],
requires=['webob'],
install_requires=['webob'],
cmdclass={'test': PyTest},
keywords='Web, Python, Python3, Refactoring, REST, Framework, RPC',
classifiers=['Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'],
**PyTest.extra_kwargs)
| 2,928 | 873 |
'''
Drilling info analysis
This program reads well header data and production logs (e.g. exported from Drilling Info as .csv files) and
walks the user through the genreation of decline curves for each well provided in the input data. Decine curves
are fit with a the hyperbolic curve that is estimated using an iterative least squares method.
Copyright 2018 Jeffrey E. Thatcher
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
### Boiler-plate imports and code
import sys
sys.path.append('./utils/')
import os, math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from geopy.distance import vincenty
# import tools and custom code
from tools import load_merge_header_and_production_csv, swap_production_dates_for_time_delta
from tools import current_selection, decline_curve, handle_numerical_variables, handle_dateTime_variables
from tools import handle_object_variables, plot_map, fit_decline_curve, add_BOE_per_day_column, nominal_decline
def main(headerCSV, productionCSV):
analysis = Quick_TypeCurve_Analysis(headerCSV, productionCSV)
print '\n********************************************************************************'
print '* *'
print '* Well Type Curve Analysis *'
print '* *'
print '* Quit this program anytime by pressing `ctrl+C` *\n'
print 'reading well header data from: %s' %headerCSV
print 'reading production data from: %s' %productionCSV
# select by well number
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
wellByName = raw_input ('would you like to select individual wells by API-UWI number? [y/n]: ')
# check user input
while wellByName not in ('y', 'n', 'Y', 'N'):
wellByName = raw_input('please try again [y/n]? ')
if wellByName == 'y' or wellByName == 'Y':
analysis.subset_by_well_name()
# select nearby wells with a circular radius
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
wellByName = raw_input ('would you like to select wells near a GPS location? [y/n]: ')
# check user input
while wellByName not in ('y', 'n', 'Y', 'N'):
wellByName = raw_input('please try again [y/n]? ')
if wellByName == 'y' or wellByName == 'Y':
analysis.subset_wells_by_distance()
# select by variable ranges
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
wellByVariable = raw_input ('would you like to subset wells by column values? [y/n]: ')
# check user input
while wellByVariable not in ('y', 'n', 'Y', 'N'):
wellByVariable = raw_input('please try again [y/n]? ')
if wellByVariable == 'y' or wellByVariable == 'Y':
analysis.subset_well_by_variable()
# plot type curve for all selected wells
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
b_value = None
# determine if user wants to pre-specify any of the decline curve aprameters
fixed_b = raw_input ('would you like to pre-specify the decline curve b-factor? [y/n]: ')
# check user input
while fixed_b not in ('y', 'n', 'Y', 'N'):
fixed_b = raw_input('please try again [y/n]? ')
if fixed_b.upper() == 'Y':
while True:
try:
b_value = float(raw_input('Enter value for b-factor: '))
except ValueError:
print 'Please enter a number'
continue
else:
break
analysis.generate_type_curve(b_value)
# plot map
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
analysis.map_selected_wells()
# save csv
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
analysis.save_selected_data()
# plot wells individually
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
analysis.plot_individual_wells_and_type_curves()
return
class Quick_TypeCurve_Analysis(object):
'''
Type curve analysis based on Jessica's work.
Decline curve estimates from a python module available at:
http://www.uky.edu/KGS/emsweb/devsh/production/decline_obj.py
'''
def __init__(self, headerCSV, productionCSV):
self.wellDF = load_merge_header_and_production_csv(headerCSV, productionCSV)
self.wellDF = add_BOE_per_day_column(self.wellDF)
self.userLocation = []
def subset_wells_by_distance(self):
# obtain longitude and latitudes from user
while len(self.userLocation) != 2:
while True:
try:
self.userLocation = raw_input('\nDefine the center of your radius in Latitude (WGS84), and Longitude (WGS84) (separate by comma): ')
self.userLocation = [x.strip() for x in self.userLocation.split(',')]
self.userLocation = [float(x) for x in self.userLocation]
except ValueError:
print 'Please enter numbers'
continue
else:
break
# obtain the selection radius from user
while True:
try:
userRadius = float(raw_input('\nDefine the radius within which you will keep all nearby wells (in miles): '))
except ValueError:
print 'Please enter numbers'
continue
else:
break
# add vicintiy column to data set
dist = np.zeros(len(self.wellDF['API/UWI']))
for i,(lat,lon) in enumerate(zip(self.wellDF['Surface Latitude (WGS84)'], self.wellDF['Surface Longitude (WGS84)'])):
dist[i] = vincenty([lat, lon], self.userLocation).miles
self.wellDF['vicinity'] = dist
# keep only wells withing the user selected radius
self.wellDF = self.wellDF.loc[self.wellDF['vicinity'] <= userRadius]
# notify user of changes to current selection
print '%i wells selected' %(len(set(self.wellDF['API/UWI'])))
return
def subset_by_well_name(self):
allWells = list(set(self.wellDF['API/UWI']))
print '\nSelect one or more of the followig wells by API/UWI number\n'
print 'all wells available...'
for i,well in enumerate(allWells):
print '%i -- %s' %(i, well)
selection = raw_input('well selection [separate by commas]:\n')
selectionList = [x.strip() for x in selection.split(',')]
self.wellDF = self.wellDF[self.wellDF['API/UWI'].isin(selectionList)]
current_selection(self.wellDF)
# notify user of changes to current selection
print '%i wells selected' %(len(set(self.wellDF['API/UWI'])))
return
def subset_well_by_variable(self):
allVariables = self.wellDF.columns.values
print '\nSelect one or more of the followig variables\n'
print 'all variables available...'
# generate dictionary of variables
variableDict = dict()
for i,var in enumerate(allVariables):
print '%i -- %s' %(i, var)
variableDict.update({i:var})
selectedVars = []
while len(selectedVars) == 0:
try:
selection = raw_input('Select the variables by their number [separate multiple selections by commas]:\n')
selectionList = [x.strip() for x in selection.split(',')]
selectedVars = [variableDict.get(int(key)) for key in selectionList]
except ValueError:
print 'Please enter variables by their number'
continue
else:
break
print 'you selected the following variables: '
print selectedVars
for colName in selectedVars:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\nthe variable \"%s\" is of type \"%s\"' %(colName, self.wellDF[colName].dtypes)
if str(self.wellDF[colName].dtypes) in ['float64', 'int64']:
self.wellDF = handle_numerical_variables(self.wellDF, colName)
elif str(self.wellDF[colName].dtypes) in ['object']:
self.wellDF = handle_object_variables(self.wellDF, colName)
elif str(self.wellDF[colName].dtypes) in ['datetime64', 'timedelta[ns]','datetime64[ns]']:
self.wellDF = handle_dateTime_variables(self.wellDF, colName)
else:
print 'data type not recognized, skipping variable'
continue
# notify user of changes to current selection
print '%i wells selected' %(len(set(self.wellDF['API/UWI'])))
return
def generate_type_curve(self, b_value = None):
# get time dela column from seleccted wells
self.wellDF = swap_production_dates_for_time_delta(self.wellDF)
# decline curve estiamged parameters
qi, b, di, r2 = fit_decline_curve(self.wellDF, fixed_b_factor = b_value)
d_nominal = nominal_decline(qi, b, di)
# times to estimate for the plot in int(days)
time_0 = 0
time_n = np.timedelta64(self.wellDF['Time Delta'].max())
decline_t = np.arange(time_0, time_n, np.timedelta64(10,'D'))
decline_t = (decline_t / np.timedelta64(1, 'D')).astype(int)
# estimated decline curve
decline_y = decline_curve(decline_t, qi, b, di)
# plot well data
fig, ax = plt.subplots(figsize = (15,8))
for API in set(self.wellDF['API/UWI']):
plotData = self.wellDF.loc[self.wellDF['API/UWI'] == API, ['Time Delta', 'BOE per day']]
days = plotData['Time Delta'].dt.days
liquid = np.array(plotData['BOE per day'])
ax.semilogy(days, liquid, '-', label = API)
# add decline estimate
ax.plot(decline_t, decline_y, '-', color='black', linewidth=5.0, label = 'Estimated Decline')
# set axis limits
xmin = (self.wellDF['Time Delta'].min() / np.timedelta64(1, 'D')).astype(int)
xmin = xmin*0.15
xmax = (self.wellDF['Time Delta'].max() / np.timedelta64(1, 'D')).astype(int)
xmax = xmax*1.06
ax.set_xlim([xmin, xmax])
# add titles and legend
ax.set_xlabel('Time [Days]')
ax.set_ylabel('BOE per Day\n[Barrels of Oil Equivalent per Day]')
ax.set_title('Decline Curve Parameters: qi=%.2f, b=%.4f, nominal decline rate=%.1f, r2=%.3f' %(qi, b, d_nominal, r2))
num_col = math.ceil(len(set(self.wellDF['API/UWI']))/40.0) # number of columns to put in legend
num_col = int(num_col)
ax.legend(bbox_to_anchor=(1.26, 0.9), ncol = num_col, fontsize = 9-num_col, labelspacing=0.2)
# Customize the major grid
ax.grid(which='major', linestyle='-', linewidth='0.5', color='grey')
# Customize the minor grid
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='grey')
# eliminate unnecessary white space
plt.subplots_adjust(left=0.07, right=0.8, top=0.9, bottom=0.1)
# save and display plot
plt.savefig('./results/Average_decline_estimate.png')
plt.close()
return
def map_selected_wells(self):
print 'generating map, this may take a minute...'
# send data to mapping function
if not(self.userLocation):
plot_map(self.wellDF)
else:
plot_map(self.wellDF, self.userLocation)
return
def save_selected_data(self):
print 'saving selected wells to .csv'
self.wellDF.to_csv('./results/selected_wells.csv')
return
def plot_individual_wells_and_type_curves(self):
print 'generating plots for all selected wells'
# get time dela column from seleccted wells
self.wellDF = swap_production_dates_for_time_delta(self.wellDF)
declineFit = []
for well in np.unique(self.wellDF['API/UWI']):
print 'fitting well # %s' %(str(well))
wellData = self.wellDF[self.wellDF['API/UWI'] == well]
# decline curve estiamged parameters
qi, b, di, r2 = fit_decline_curve(wellData)
# compute Nominal decline
d_nominal = nominal_decline(qi, b, di)
# add data to list for saving to excel
declineFit.append([wellData, qi, b, d_nominal, di, r2])
# times to estimate for the plot in int(days)
time_0 = 0
time_n = np.timedelta64(wellData['Time Delta'].max())
decline_t = np.arange(time_0, time_n, np.timedelta64(10,'D'))
decline_t = (decline_t / np.timedelta64(1, 'D')).astype(int)
# estimated decline curve
decline_y = decline_curve(decline_t, qi, b, di)
# plot well data
fig, ax = plt.subplots(figsize = (15,8))
days = wellData['Time Delta'].dt.days
liquid = np.array(wellData['BOE per day'])
ax.semilogy(days, liquid, 'o-', label = well)
# add decline estimate
ax.plot(decline_t, decline_y, '-', color='black', linewidth=5.0, label = 'Estimated Decline')
# set axis limits
xmin = (wellData['Time Delta'].min() / np.timedelta64(1, 'D')).astype(int)
xmin = xmin*0.15
xmax = (wellData['Time Delta'].max() / np.timedelta64(1, 'D')).astype(int)
xmax = xmax*1.06
ax.set_xlim([xmin, xmax])
# add titles and legend
ax.set_xlabel('Time [Days]')
ax.set_ylabel('BOE per Day\n[Barrels of Oil Equivalent per Day]')
ax.set_title('Decline Curve Parameters: qi=%.2f, b=%.4f, nominal decline rate=%.1f, r2=%.3f' %(qi, b, d_nominal, r2))
ax.legend(bbox_to_anchor=(1.28, 1.05))
# Customize the major grid
ax.grid(which='major', linestyle='-', linewidth='0.5', color='grey')
# Customize the minor grid
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='grey')
# eliminate unnecessary white space
plt.subplots_adjust(left=0.07, right=0.8, top=0.9, bottom=0.1)
# save and display plot
plt.savefig('./results/' + str(well) + '_decline_estimate.png')
plt.close()
declineFitDF = pd.DataFrame(declineFit, columns = ['API/UWI', 'qi', 'b', 'nominal decline rate', 'effective decline rate[di]', 'r2'])
declineFitDF.to_csv('./results/individual_well_decline_curves.csv')
return
if __name__ == '__main__':
### well data files
headerCSV = './data/Well_header_data.csv'
productionCSV = './data/Production_Time_Series.CSV'
main(headerCSV, productionCSV)
| 13,664 | 5,103 |
#!/usr/bin/env python
import os
import re
import unittest
from git import Repo
from semver import match
from click import option, argument, echo, ClickException
from touchresume.cli import cli
from touchresume import __version__
@cli.command(with_appcontext=False)
@option('-d', '--dir', default='tests', help='Directory with tests')
def test(dir):
"""Discover and run unit tests."""
testsuite = unittest.TestLoader().discover(dir)
unittest.TextTestRunner(verbosity=2, buffer=True).run(testsuite)
@cli.command(with_appcontext=False)
@option('-d', '--dev', default='dev', help='Develop branch (dev)')
@option('-m', '--master', default='master', help='Master branch (master)')
@argument('version')
def release(dev, master, version, app_path='touchresume'):
"""Make Git release."""
if not match(version, f'>{__version__}'):
raise ClickException(f'Version must be greater than {__version__}')
repo = Repo()
release = f'release/{version}'
echo(f'Create {release} branch')
repo.head.ref = repo.heads[dev]
repo.head.ref = repo.create_head(release)
echo(f'Bump version - {version}')
version_file = os.path.join(app_path, '__init__.py')
with open(version_file, 'r+') as f:
content = f.read()
target = f"__version__ = '{__version__}'"
value = f"__version__ = '{version}'"
f.seek(0)
f.write(content.replace(target, value))
repo.index.add([version_file])
repo.index.commit(f'bump version - v{version}')
diff = repo.head.commit.diff(None)
cf = re.compile(r'^change[s|log].*')
changelog_files = [d.a_path for d in diff if cf.match(d.a_path.lower())]
if changelog_files:
echo(f'Commit {", ".join(changelog_files)}')
repo.index.add(changelog_files)
repo.index.commit(f'update changelog - v{version}')
rf = 'readme'
readme_files = [d.a_path for d in diff if d.a_path.lower().startswith(rf)]
if readme_files:
echo(f'Commit {", ".join(readme_files)}')
repo.index.add(readme_files)
repo.index.commit(f'update readme - v{version}')
echo(f'Merge {release} into {master}')
repo.head.ref = repo.heads[master]
parents = (repo.branches[release].commit, repo.branches[master].commit)
repo.index.commit(f'merge {release}', parent_commits=parents)
echo(f'Create v{version} tag')
repo.create_tag(f'v{version}')
echo(f'Merge {release} back into {dev}')
repo.head.ref = repo.heads[dev]
dev_parents = (repo.branches[release].commit, repo.branches[dev].commit)
repo.index.commit(f'merge {release} back', parent_commits=dev_parents)
echo(f'Delete {release} branch')
repo.delete_head(release)
if __name__ == '__main__':
cli()
| 2,745 | 956 |
from reb.src import pynyt
from reb.conf import APIKEY_NYT_ARTICLE
nyt = pynyt.ArticleSearch(APIKEY_NYT_ARTICLE)
nytArchive = pynyt.ArchiveApi(APIKEY_NYT_ARTICLE)
# # get 1000 news articles from the Foreign newsdesk from 1987
# results_obama = nyt.query(
# q='obama',
# begin_date="20170101",
# end_date="20170102",
# # facet_field=['source', 'day_of_week'],
# # facet_filter = True,
# verbose=True)
arch = nytArchive.query(
year="2012",
month="1"
) | 485 | 218 |
"""A file system service for managing CVMFS-based client file systems."""
import os
from cm.services import service_states
import logging
log = logging.getLogger('cloudman')
class CVMFS(object):
def __init__(self, filesystem, fs_type):
self.fs = filesystem # File system encapsulating this implementation
self.fs_type = fs_type
self.app = self.fs.app # A local reference to app (used by @TestFlag)
def __str__(self):
return str(self.fs.mount_point)
def __repr__(self):
return str(self.fs.mount_point)
def _get_details(self, details):
details['DoT'] = "No"
details['kind'] = self.fs_type
return details
def start(self):
"""
Start the service.
For the case of CVMFS, just list the file system path.
"""
os.listdir(self.fs.mount_point)
def stop(self):
"""Nothing to do for CVMFS."""
pass
def status(self):
"""Check if the mount point contains data and mark as running if so."""
if os.listdir(self.fs.mount_point):
self.fs.state = service_states.RUNNING
update_size_cmd = ("df --block-size 1 | grep %s$ | awk "
"'{print $2, $3, $5}'" % self.fs.mount_point)
self.fs._update_size(cmd=update_size_cmd)
else:
self.fs.state = service_states.ERROR
| 1,403 | 428 |
# -*- coding: utf-8 -*-
import re, web, datetime, hashlib, struct, yaml, sys, wikipedia
import xml.etree.ElementTree as ET
re_NUMERIC = re.compile("(-?\d+)[ ,]+(-?\d+)")
re_NUMERICF = re.compile("(-?[\.\d]+)[ ,]+(-?[\.\d]+)") #fractions allowed
re_EXPEDITION = re.compile('\[\[(\d{4}-\d{2}-\d{2} -?\d+ -?\d+)')
def getdjia(date):
try:
return web.get("http://geo.crox.net/djia/%s" % date)
except:
return None
def geohash(djia, date):
sum = hashlib.md5("%s-%s" % (date, djia)).digest()
lat, lon = [x/2.**64 for x in struct.unpack_from(">QQ", sum)];
return lat, lon
def exp2latlon(expstring): #parameter: a string in the expedition format yyyy-mm-dd lll lll
sdate, lat, lon = expstring.split()
date = datetime.datetime.strptime(sdate,"%Y-%m-%d").date()
if int(lon)>-30 and date > datetime.date(2008,05,26):
date = date - datetime.timedelta(1) #use the previous day for W30
djia = getdjia(date)
if not djia:
print ("Could not retrieve DJIA for %s." % date)
return
if "not available" in djia:
print ("DJIA for %s not available yet." % date)
return
if "error" in djia:
print ("crox reported an error while retrieving DJIA for %s." % date)
return
flat, flon = geohash(djia, sdate)
geolat = lat + str(flat)[1:]
geolon = lon + str(flon)[1:]
return geolat, geolon
def geolookup(lat, lon):
et = None
result = {}
try:
bytes = web.get("http://ws.geonames.org/extendedFindNearby?lat=%s&lng=%s" % (lat,lon))
et = ET.fromstring(bytes)
except:
phenny.msg(renick, "geonames.org web services seem to be unavailable.")
return
s = ""
for item in et:
if item.tag in ["geoname","ocean"]:
ifcode = item.find("fcode")
if ifcode!=None:
result[ifcode.text] = item.find("name").text
elif item.tag in ["country","continent"]:
result[item.tag] = item.text
elif item.tag == "address":
result["streetNumber"] = item.find("streetNumber").text
result["street"] = item.find("street").text
result["postalcode"] = item.find("postalcode").text
result["placename"] = item.find("placename").text
result["adminName2"] = item.find("adminName2").text
result["adminName1"] = item.find("adminName1").text
result["countryCode"] = item.find("countryCode").text
else:
print "Unhandled tag: %s" % item.tag
return result
site = wikipedia.getSite()
if len(sys.argv)>1:
user = sys.argv[1]
else:
print "usage:\n./regional username"
sys.exit(1)
page = wikipedia.Page(site, "User:"+user)
data = page.get()
expeditions = re_EXPEDITION.findall(data)
regionals = {}
count = 0
for exp in expeditions:
date, glat, glon = exp.split()
lat, lon = exp2latlon(exp)
place = geolookup(lat,lon)
print place
for fcode, name in place.iteritems():
if fcode:
if not fcode in regionals:
regionals[fcode]={}
if not name in regionals[fcode]:
regionals[fcode][name]={}
regionals[fcode][name][glat+" "+glon]=True
for fcode, names in regionals.iteritems():
for name, grats in names.iteritems():
num = len(grats)
print "%s %s - %i graticules" % (fcode, name, num)
if num>3:
for grat in grats:
print grat+";",
print
| 3,240 | 1,229 |
def nearest_smallest_element(arr):
"""
Given an array arr, find the nearest smaller element for each element.
The index of the smaller element must be smaller than the current element.
"""
smaller_numbers = []
def nearest(n):
def find_previous_num():
for previous_num in reversed(smaller_numbers):
if previous_num < n:
return previous_num
return -1
def append_smaller_number_before_preceding_big(n):
while len(smaller_numbers) > 0 and smaller_numbers[-1] > n:
smaller_numbers.pop()
smaller_numbers.append(n)
previous_num = find_previous_num()
append_smaller_number_before_preceding_big(n)
return previous_num
return [nearest(n) for n in arr]
def test_nearest_smallest_element():
assert nearest_smallest_element([4, 5, 2, 10, 12, 11] ) == [-1, 4, -1, 2, 10, 10]
assert nearest_smallest_element([4]) == [-1]
assert nearest_smallest_element([]) == []
| 1,034 | 320 |
# -*- coding: utf-8 -*-
'''
Relations for BIRD.
'''
import socket
import netaddr
import netifaces
from charmhelpers.core import hookenv
from charmhelpers.core.services.helpers import RelationContext
def router_id():
'''
Determine the router ID that should be used.
This function uses the common logic of finding the IPv4 addresses
assigned on all interfaces and picking the numerically lowest of
them (that is not in the 127.0.0.0/8 block).
'''
def get_assigned_ips():
ifs = netifaces.interfaces()
for interface in ifs:
if_addrs = netifaces.ifaddresses(interface)
ip4_data = if_addrs.get(netifaces.AF_INET, [])
for ip4 in ip4_data:
yield netaddr.IPAddress(ip4['addr'])
excluded_net = netaddr.IPNetwork('127.0.0.0/8')
for addr in sorted(get_assigned_ips()):
if addr not in excluded_net:
return str(addr)
def resolve_domain_name(name, ip_version=4):
'''
Takes a domain name and resolves it to an IP address
of a given version.
Currently only ever returns one address.
'''
results = socket.getaddrinfo(name, None)
addresses = (netaddr.IPAddress(r[4][0]) for r in results)
filtered = (a for a in addresses if a.version == ip_version)
try:
addr = filtered.next()
except StopIteration:
addr = ''
return str(addr)
def local_ipv6_address():
'''
Determines the IPv6 address to use to contact this machine. Excludes
link-local addresses.
Currently only returns the first valid IPv6 address found.
'''
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
for addr in addresses.get(netifaces.AF_INET6, []):
# Make sure we strip any interface specifier from the address.
addr = netaddr.IPAddress(addr['addr'].split('%')[0])
if not (addr.is_link_local() or addr.is_loopback()):
return str(addr)
class BgpRRRelation(RelationContext):
'''
Relation context for the BGP Route Reflector interface.
'''
name = 'bgp-route-reflector'
interface = 'bgp-route-reflector'
required_keys = []
def is_ready(self):
return True
def _is_ready(self, data):
return set(data.keys()).issuperset(set(['addr', 'addr6']))
def get_data(self):
peers = []
peers6 = []
for rid in hookenv.relation_ids(self.name):
for unit in hookenv.related_units(rid):
rel = hookenv.relation_get(attribute='addr',
rid=rid,
unit=unit)
if rel is not None:
addr = resolve_domain_name(rel)
if addr:
peers.append(addr)
rel6 = hookenv.relation_get(attribute='addr6',
rid=rid,
unit=unit)
if rel6 is not None:
peers6.append(rel6)
self['bgp_peers'] = peers
self['bgp_peers6'] = peers6
self['router_id'] = router_id()
return
def provide_data(self):
return {
'addr': hookenv.unit_get('private-address'),
'addr6': local_ipv6_address()
}
| 3,385 | 1,015 |
from hashlib import sha1
from django.core.cache import cache
from django.utils.encoding import smart_str
def cached(key=None, timeout=300):
"""
Cache the result of function call.
Args:
key: the key with which value will be saved. If key is None
then it is calculated automatically
timeout: number of seconds after which the cached value would be purged.
"""
_key = key
def func_wrapper(func):
def args_wrapper(*args, **kwargs):
# this is workaround of strange python behaviour
key = _key
if key is None:
# Not sure that this will work correct in all cases
key = sha1(str(func.__module__) + str(func.__name__) +\
smart_str(args) +\
smart_str(frozenset(kwargs.items()))).hexdigest()
value = cache.get(key)
if value:
return value
else:
value = func(*args, **kwargs)
cache.set(key, value)
return value
return args_wrapper
return func_wrapper
| 1,136 | 287 |
#!/usr/bin/env python3
"""
1. Go to:
https://usage.dteenergy.com/?interval=hour
2. Download CSV
3. Run:
python dtecsv.py .\electric_usage_report_05-31-2021_to_06-05-2021.csv
"""
import csv
import datetime
import click
import matplotlib.pyplot as plt
x = []
y = []
@click.command()
@click.argument('file', type=click.Path(exists=True))
def main(file):
"""
Will plot data from DTE Energy CSV
:param file: DTE CSV file
"""
with open(file, 'r') as file:
lines = csv.reader(file)
next(lines) # Skip first line that is header
for row in lines:
rawdate = row[1] + ' ' + row[2] # 05/15/2021 11:00 AM
# date = datetime.datetime.strptime(rawdate, "%m/%d/%Y %I:00 %p").strftime("%Y-%m-%d %H:00")
date = datetime.datetime.strptime(rawdate, "%m/%d/%Y %I:00 %p").strftime("%b %d %H:00")
x.append(date)
y.append(float(row[3]))
# Risize the figure (optional)
plt.figure(figsize=(18, 9))
# Plot the x and y values on the graph
plt.plot(x, y)
# Here you specify the ticks you want to display
# You can also specify rotation for the tick labels in degrees or with keywords.
plt.xticks(x[::2], rotation='vertical')
# Add margins (padding) so that markers don't get clipped by the axes
plt.margins(0.2)
# Display the graph
plt.show()
if __name__ == '__main__':
main()
| 1,417 | 527 |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import sys
from distutils.version import LooseVersion
from pathlib import Path
from typing import List, Optional
import yaml
from model_navigator.converter import DatasetProfileConfig
from model_navigator.exceptions import ModelNavigatorProfileException
from model_navigator.kubernetes.yaml import CustomDumper
from model_navigator.model_analyzer import ModelAnalyzer, ModelAnalyzerProfileConfig
from model_navigator.model_analyzer.config import BaseConfigGenerator, ModelAnalyzerTritonConfig
from model_navigator.model_analyzer.model_analyzer import ModelAnalyzerMode
from model_navigator.model_analyzer.model_analyzer_config import ModelAnalyzerConfig
from model_navigator.perf_analyzer import PerfMeasurementConfig
from model_navigator.triton import DeviceKind
from model_navigator.triton.model_config import TritonModelConfigGenerator
from model_navigator.triton.utils import get_shape_params
from model_navigator.utils import Workspace
LOGGER = logging.getLogger(__name__)
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
class Profiler:
def __init__(
self,
*,
workspace: Workspace,
triton_docker_image: str,
gpus: List[str],
verbose: bool = False,
profile_config: ModelAnalyzerProfileConfig,
triton_config: ModelAnalyzerTritonConfig,
perf_measurement_config: PerfMeasurementConfig,
dataset_profile_config: Optional[DatasetProfileConfig] = None,
profiling_data_path: Optional[Path] = None,
):
self._workspace = workspace
self._triton_config = triton_config
self._triton_docker_image = triton_docker_image
self._profile_config = profile_config
self._dataset_profile_config = dataset_profile_config
self._profiling_data_path = profiling_data_path
self._perf_measurement_config = perf_measurement_config
self._config_generator: ProfileConfigGenerator = ProfileConfigGenerator(
workspace=self._workspace,
profile_config=self._profile_config,
triton_config=triton_config,
triton_docker_image=triton_docker_image,
verbose=verbose,
dataset_profile_config=dataset_profile_config,
profiling_data_path=profiling_data_path,
perf_measurement_config=perf_measurement_config,
gpus=gpus,
)
self._profile_config_path = self._config_generator.analyzer_path / "config-profile.yaml"
self._verbose = verbose
self._prepare_catalogs()
def run(self) -> Path:
config = self._config_generator.generate_config()
self._profile_config_path.parent.mkdir(parents=True, exist_ok=True)
with self._profile_config_path.open("w") as config_file:
config_content = yaml.dump(config, Dumper=CustomDumper)
LOGGER.debug("Triton Model Analyzer profile config:\n" f"{config_content}")
config_file.write(config_content)
analyzer_config = ModelAnalyzerConfig()
analyzer_config["config-file"] = self._profile_config_path.as_posix()
analyzer = ModelAnalyzer(config=analyzer_config)
analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=self._verbose)
latest_checkpoint_path = self._find_latest_checkpoint()
LOGGER.info(f"Triton Model Analyzer profiling done. Results are stored in {latest_checkpoint_path}")
return latest_checkpoint_path
def _find_latest_checkpoint(self):
checkpoints_paths = sorted(
self._config_generator.checkpoints_dir_path.glob("*.ckpt"),
key=lambda path: int(path.stem),
)
latest_checkpoint_path = checkpoints_paths[-1] if checkpoints_paths else None
return latest_checkpoint_path
def _prepare_catalogs(self):
def _remove_and_create_dir(dir_path: Path):
if dir_path.is_dir():
LOGGER.debug(f"Removing {dir_path}")
shutil.rmtree(dir_path)
dir_path.mkdir(parents=True)
_remove_and_create_dir(self._config_generator.analyzer_path)
class ProfileConfigGenerator(BaseConfigGenerator):
def __init__(
self,
*,
workspace: Workspace,
profile_config: ModelAnalyzerProfileConfig,
triton_config: ModelAnalyzerTritonConfig,
perf_measurement_config: PerfMeasurementConfig,
gpus: List[str],
triton_docker_image: Optional[str] = None,
verbose: int = 0,
dataset_profile_config: Optional[DatasetProfileConfig] = None,
profiling_data_path: Optional[Path] = None,
):
super().__init__(workspace=workspace, verbose=verbose)
self._analyzer_triton_log_path = self._analyzer_path / "triton.log"
self._triton_config = triton_config
self._triton_docker_image = triton_docker_image
self._verbose = verbose
self._profile_config = profile_config
self._dataset_profile_config = dataset_profile_config
self._profiling_data_path = profiling_data_path
self._perf_measurement_config = perf_measurement_config
self._gpus = gpus
@property
def triton_log_path(self) -> Path:
return self._analyzer_triton_log_path.resolve()
def generate_config(self):
model_repository = self._triton_config.model_repository
models_list = [model_dir.name for model_dir in model_repository.glob("*") if model_dir.is_dir()]
LOGGER.info(f"Prepare profiling for {len(models_list)} models from {model_repository}:")
for model_name in models_list:
LOGGER.info(f"\t- {model_name}")
model_names_with_profile_config = {
model_name: self._get_profile_config_for_model(model_name) for model_name in models_list
}
if any(profile_config for model_name, profile_config in model_names_with_profile_config.items()):
models_list = model_names_with_profile_config
if self._profile_config.config_search_max_preferred_batch_size > 0:
max_preferred_batch_size = self._profile_config.config_search_max_preferred_batch_size
else:
max_preferred_batch_size = 1
manual_config_search = all(
isinstance(models_list, dict) and models_list[model_name].get("model_config_parameters")
for model_name in models_list
)
# https://github.com/triton-inference-server/model_analyzer/blob/r21.12/docs/config.md
config = {
"run_config_search_disable": manual_config_search,
"profile_models": models_list,
"triton_docker_image": self._triton_docker_image,
"triton_launch_mode": self._triton_config.triton_launch_mode.value,
"model_repository": model_repository.resolve().as_posix(),
"checkpoint_directory": self._analyzer_checkpoints_dir_path.as_posix(),
"output_model_repository_path": self.output_model_repository_path.as_posix(),
"export_path": self._analyzer_path.resolve().as_posix(),
"triton_server_flags": {"strict-model-config": False},
"run_config_search_max_concurrency": self._profile_config.config_search_max_concurrency,
"run_config_search_max_instance_count": self._profile_config.config_search_max_instance_count,
"run_config_search_max_preferred_batch_size": max_preferred_batch_size,
"perf_analyzer_timeout": self._perf_measurement_config.perf_analyzer_timeout,
"perf_analyzer_flags": self._get_perf_analyzer_flags(),
"triton_server_path": self._triton_config.triton_server_path,
"override_output_model_repository": True,
"gpus": list(self._gpus),
"summarize": self._verbose,
"verbose": self._verbose,
"perf_output": self._verbose,
"triton_output_path": self.triton_log_path.as_posix(),
}
return config
def _get_perf_analyzer_flags(self):
configuration = {}
if self._profiling_data_path:
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
configuration["input-data"] = [self._profiling_data_path.as_posix()]
else:
configuration["input-data"] = self._profiling_data_path.as_posix()
elif self._dataset_profile_config and self._dataset_profile_config.max_shapes:
shapes = get_shape_params(self._dataset_profile_config)
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
configuration["shape"] = shapes
else:
configuration["shape"] = " ".join(shapes)
configuration["measurement-interval"] = self._perf_measurement_config.perf_measurement_interval
configuration["measurement-mode"] = self._perf_measurement_config.perf_measurement_mode
configuration["measurement-request-count"] = self._perf_measurement_config.perf_measurement_request_count
return configuration
def _get_profile_config_for_model(self, model_dir_name):
original_model_config_path = self._triton_config.model_repository / model_dir_name / "config.pbtxt"
original_model_config = TritonModelConfigGenerator.parse_triton_config_pbtxt(original_model_config_path)
model_config = {}
if self._profile_config.config_search_instance_counts:
mapping = {DeviceKind.GPU: "KIND_GPU", DeviceKind.CPU: "KIND_CPU"}
model_config["instance_group"] = [
{"kind": mapping[kind], "count": counts}
for kind, counts in self._profile_config.config_search_instance_counts.items()
]
if self._profile_config.config_search_max_batch_sizes:
model_config["max_batch_size"] = self._profile_config.config_search_max_batch_sizes
if self._profile_config.config_search_preferred_batch_sizes:
model_config["dynamic_batching"] = {
"preferred_batch_size": self._profile_config.config_search_preferred_batch_sizes
}
if self._profile_config.config_search_backend_parameters:
original_backend_parameters = original_model_config.backend_parameters_config.triton_backend_parameters
original_backend_parameters = {
param_name: {"string_value": [param_value]}
for param_name, param_value in original_backend_parameters.items()
}
model_config["parameters"] = {
**original_backend_parameters,
**{
param_name: {"string_value": list(map(str, param_values))}
for param_name, param_values in self._profile_config.config_search_backend_parameters.items()
},
}
configuration = {}
if model_config:
configuration["model_config_parameters"] = model_config
if self._profile_config.config_search_concurrency:
configuration["parameters"] = {"concurrency": self._profile_config.config_search_concurrency}
engine_count_per_device = original_model_config.instances_config.engine_count_per_device
if self._profile_config.config_search_max_instance_count and engine_count_per_device:
if len(set(engine_count_per_device)) > 1:
raise ModelNavigatorProfileException(
"Triton Model config instance group have more than 1 device kind. "
"Use manual profile to swipe over instance group count"
)
elif DeviceKind.CPU in engine_count_per_device:
configuration["cpu_only"] = True
return configuration
| 12,598 | 3,677 |
# The following comments couldn't be translated into the new config version:
# Test storing OtherThing as well
# Configuration file for PrePoolInputTest
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST2ND")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
#process.maxEvents = cms.untracked.PSet(
# input = cms.untracked.int32(11)
#)
#process.Thing = cms.EDProducer("ThingProducer")
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('keep *',
'drop *_Thing_*_*'),
fileName = cms.untracked.string('PoolInput2FileTest.root')
)
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:PoolInputOther.root") )
process.p = cms.Path(process.OtherThing)
process.ep = cms.EndPath(process.output)
| 906 | 311 |
import datetime
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import casadi as cas
##### For viewing the videos in Jupyter Notebook
import io
import base64
from IPython.display import HTML
# from ..</src> import car_plotting
# from .import src.car_plotting
PROJECT_PATH = '/home/nbuckman/Dropbox (MIT)/DRL/2020_01_cooperative_mpc/mpc-multiple-vehicles/'
sys.path.append(PROJECT_PATH)
import src.MPC_Casadi as mpc
import src.car_plotting as cplot
import src.TrafficWorld as tw
np.set_printoptions(precision=2)
import src.IterativeBestResponseMPCMultiple as mibr
import pickle
SAVE = False
PLOT = False
rounds_ibr = 225
n_other_cars = 4
N = 50
###### LATEX Dimensions (Not currently Working)
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inches
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height =fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
fig_size = [6, 4]
#################33
def find_t_final(x, goal_x):
i_upper = np.searchsorted(x[0,:], goal_x)
i_lower = i_upper - 1
dt = 0.2
# if i_upper >= x.shape[1]:
# print(i_upper, x[0,i_lower])
# print("Check: %.03f < %.03f"%(x[0,i_lower], goal_x))
t_lower = i_lower*dt
x_lower = x[0, i_lower]
x_remaining = goal_x - x_lower
v_x = np.cos(x[2, i_lower]) * x[4, i_lower]
t_remaining = x_remaining / v_x
t_final = t_lower + t_remaining
# print("%.03f %.03f"%(t_lower, t_final))
return t_final
#### STEP 1: Sort all the files into the correct SVO
all_subdir = [
"20200301_215332random_ego",
"20200301_215346random_pro",
"20200301_215432random_altru",
"20200301_215520random_pro",
"20200301_215526random_altru",
"20200301_215537random_ego",
"20200301_215551random_pro",
"20200301_215602random_altru",
"20200301_215608random_ego",
"20200301_215623random_pro",
"20200301_215629random_altru",
"20200301_215636random_ego",
"20200301_215652random_pro",
"20200301_215658random_altru",
"20200301_215703random_ego",
"20200301_215713random_pro",
"20200301_215724random_altru",
"20200301_215742random_ego",
"20200301_215751random_pro",
"20200301_215757random_altru",
"20200301_215806random_ego",
"20200302_104840random_1p",
"20200302_104913random_2p",
"20200302_104916random_3p",
"20200302_104920random_4p",
"20200302_104926random_1e",
"20200302_104941random_2e",
"20200302_104946random_3e",
"20200302_105002random_4e",
"20200302_105059random_1a",
"20200302_105101random_2a",
"20200302_105104random_3a",
"20200302_105108random_4a",
"20200302_114834random_5e",
"20200302_114839random_6e",
"20200302_114841random_7e",
"20200302_114844random_8e",
"20200302_114853random_5p",
"20200302_114856random_6p",
"20200302_114859random_7p",
"20200302_114902random_8p",
"20200302_114909random_5a",
"20200302_114912random_6a",
"20200302_114914random_7a",
"20200302_114916random_8a",
"20200227_133704less_kxdotlarger",
"20200228_114359random_pro",
"20200228_114437random_pro",
"20200228_114440random_pro",
"20200228_114443random_pro",
"20200228_114448random_pro",
"20200228_114450random_pro",
"20200228_114913random_pro",
"20200228_114914random_pro",
"20200228_114916random_pro",
"20200228_114917random_pro",
"20200227_142916pi_01_ego",
"20200228_114517random_ego",
"20200228_114518random_ego",
"20200228_114528random_ego",
"20200228_114532random_ego",
"20200228_114547random_ego",
"20200228_114551random_ego",
"20200228_114803random_ego",
"20200228_114805random_ego",
"20200228_114806random_ego",
"20200227_141954pi2_5altru",
"20200228_114501random_altru",
"20200228_114503random_altru",
"20200228_114505random_altru",
"20200228_114506random_altru",
"20200228_114507random_altru",
"20200228_114509random_altru",
"20200228_114850random_altru",
"20200228_114851random_altru",
"20200228_114852random_altru",
]
subdir_name_prosocial_list = []
subdir_name_ego_list = []
subdir_name_altruistic_list = []
altr_theta = []
ego_theta = []
pro_theta = []
NO_GRASS = False
world = tw.TrafficWorld(2, 0, 1000)
for subdir in all_subdir:
try:
file_name = "results/" + subdir+"/data/"+"mpc3.p"
mpc = pickle.load(open(file_name,'rb'))
if mpc.min_y < -999999 or mpc.max_y > 9999999:
print("Messed up ymin/max", file_name)
continue
elif mpc.min_y > world.y_min + 0.000001:
print("Grass is NOT allowed!", file_name)
if not NO_GRASS:
print("Too grass lmmited, ignored", file_name)
continue
elif mpc.min_y <= world.y_min + 0.00001:
print("Grass is allowed!", file_name)
if NO_GRASS:
print("NO Grass, dataset ignored", file_name)
continue
if mpc.theta_iamb > np.pi/3:
subdir_name_altruistic_list += [subdir]
altr_theta += [mpc.theta_iamb]
elif mpc.theta_iamb <= np.pi/6.0:
subdir_name_ego_list += [subdir]
ego_theta += [mpc.theta_iamb]
else:
subdir_name_prosocial_list += [subdir]
pro_theta += [mpc.theta_iamb]
except FileNotFoundError:
print("Not found:", file_name)
print("Atruistic np.pi/2 = 1.5ish")
print(subdir_name_altruistic_list)
print(altr_theta)
print("Egoistic 0")
print(subdir_name_ego_list)
print(ego_theta)
print("Pro-Social", np.pi/2)
print(subdir_name_prosocial_list)
print(pro_theta)
# subdir_name_prosocial_list = [
# "20200227_133704less_kxdotlarger",
# "20200228_114359random_pro",
# "20200228_114437random_pro",
# "20200228_114440random_pro",
# "20200228_114443random_pro",
# "20200228_114448random_pro",
# "20200228_114450random_pro",
# "20200228_114913random_pro",
# "20200228_114914random_pro",
# "20200228_114916random_pro",
# "20200228_114917random_pro",
# ]
# subdir_name_prosocial = "20200227_133704less_kxdotlarger"
# folder_prosocial = "results/" + subdir_name_prosocial + "/"
# subdir_name_ego_list = [
# "20200227_142916pi_01_ego",
# "20200228_114517random_ego",
# "20200228_114518random_ego",
# "20200228_114528random_ego",
# "20200228_114532random_ego",
# "20200228_114547random_ego",
# "20200228_114551random_ego",
# "20200228_114803random_ego",
# "20200228_114805random_ego",
# "20200228_114806random_ego",
# ]
# subdir_name_ego = "20200227_142916pi_01_ego"
# folder_ego = "results/" + subdir_name_ego + "/"
# subdir_name_altruistic_list = [
# "20200227_141954pi2_5altru",
# "20200228_114501random_altru",
# "20200228_114503random_altru",
# "20200228_114505random_altru",
# "20200228_114506random_altru",
# "20200228_114507random_altru",
# "20200228_114509random_altru",
# "20200228_114850random_altru",
# "20200228_114851random_altru",
# "20200228_114852random_altru"]
# subdir_name_altruistic = "20200227_141954pi2_5altru"
# folder_altruistic = "results/" + subdir_name_altruistic + "/"
################ Analyze Results
all_xamb_pro = []
all_uamb_pro = []
all_other_x_pro = []
all_other_u_pro = []
ibr_brounds_array_pro = []
all_xamb_ego = []
all_uamb_ego = []
all_other_x_ego = []
all_other_u_ego = []
ibr_brounds_array_ego = []
all_xamb_altru = []
all_uamb_altru = []
all_other_x_altru = []
all_other_u_altru = []
ibr_brounds_array_altru = []
all_tfinalamb_pro = []
all_tfinalamb_ego = []
all_tfinalamb_altru = []
for sim_i in range(3):
if sim_i==0:
subdir_name_list = subdir_name_prosocial_list
elif sim_i==1:
subdir_name_list = subdir_name_ego_list
else:
subdir_name_list = subdir_name_altruistic_list
for folder in subdir_name_list:
n_full_rounds = 0 # rounods that the ambulance planned
n_all_rounds = 0
all_xamb = np.zeros((6, N+1, rounds_ibr))
all_uamb = np.zeros((2, N, rounds_ibr))
all_xcost = np.zeros((3, rounds_ibr))
all_tfinalamb = np.zeros((1, rounds_ibr))
all_other_x = [np.zeros((6, N+1, rounds_ibr)) for i in range(n_other_cars)]
all_other_u = [np.zeros((2, N, rounds_ibr)) for i in range(n_other_cars)]
all_other_cost = [np.zeros((3, rounds_ibr)) for i in range(n_other_cars)]
all_other_tfinal = [np.zeros((1, rounds_ibr)) for i in range(n_other_cars)]
for amb_ibr_i in range(rounds_ibr):
if (amb_ibr_i % (n_other_cars + 1) == 1) and amb_ibr_i>51: # We only look at sims when slack activated
ibr_prefix = '%03d'%amb_ibr_i
try:
xamb, uamb, xamb_des, xothers, uothers, xothers_des = mibr.load_state("results/" + folder + "/" + "data/" + ibr_prefix, n_other_cars)
all_xamb[:,:,n_full_rounds] = xamb
all_uamb[:,:,n_full_rounds] = uamb
x_goal = 130
all_tfinalamb[:, n_full_rounds] = find_t_final(xamb, x_goal)
for i in range(n_other_cars):
all_other_x[i][:,:,n_full_rounds] = xothers[i]
all_other_u[i][:,:,n_full_rounds] = uothers[i]
# all_other_tfinal[i][:,n_full_rounds] = find_t_final(xothers[i], 120)
n_full_rounds += 1
except FileNotFoundError:
# print("amb_ibr_i %d missing"%amb_ibr_i)
pass
n_all_rounds += 1
### Clip the extra dimension
all_xamb = all_xamb[:,:,:n_full_rounds]
all_uamb = all_uamb[:,:,:n_full_rounds]
all_tfinalamb = all_tfinalamb[:,:n_full_rounds]
for i in range(n_other_cars):
all_other_x[i] = all_other_x[i][:,:,:n_full_rounds]
all_other_u[i] = all_other_u[i][:,:,:n_full_rounds]
ibr_brounds_array = np.array(range(1, n_full_rounds +1))
if n_full_rounds > 0 : # only save those that meet slack requirement
if sim_i==0: #prosocial directory
all_xamb_pro += [all_xamb]
all_uamb_pro += [all_uamb]
all_other_x_pro += [all_other_x]
all_other_u_pro += [all_other_u]
ibr_brounds_array_pro += [ibr_brounds_array]
all_tfinalamb_pro += [all_tfinalamb]
elif sim_i==1: #egoistic directory
all_xamb_ego += [all_xamb]
all_uamb_ego += [all_uamb]
all_other_x_ego += [all_other_x]
all_other_u_ego += [all_other_u]
ibr_brounds_array_ego += [ibr_brounds_array]
all_tfinalamb_ego += [all_tfinalamb]
else: #altruistic directory
all_xamb_altru += [all_xamb]
all_uamb_altru += [all_uamb]
all_other_x_altru += [all_other_x]
all_other_u_altru += [all_other_u]
ibr_brounds_array_altru += [ibr_brounds_array]
all_tfinalamb_altru += [all_tfinalamb]
else:
print("No slack eligible", folder)
### SAVING IN PROSOCIAL'S DIRECTORy
folder = "random" #<----
fig_trajectory, ax_trajectory = plt.subplots(1,1)
ax_trajectory.set_title("Ambulance Trajectories")
# fig_trajectory.set_figheight(fig_height)
# fig_trajectory.set_figwidth(fig_width)
fig_trajectory.set_size_inches((8,6))
print(len(all_xamb_pro))
print(all_xamb_pro[0].shape)
ax_trajectory.plot(all_xamb_pro[0][0,:,-1], all_xamb_pro[0][1,:,-1], '-o', label="Prosocial")
ax_trajectory.plot(all_xamb_ego[0][0,:,-1], all_xamb_ego[0][1,:,-1], '-o', label="Egoistic")
ax_trajectory.plot(all_xamb_altru[0][0,:,-1], all_xamb_altru[0][1,:,-1], '-o', label="Altruistic")
ax_trajectory.set_xlabel("X [m]")
ax_trajectory.set_ylabel("Y [m]")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig1_amb_trajectory.eps'
fig_trajectory.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
##########################################333333
svo_labels = ["Egoistic", "Prosocial", "Altruistic"]
fig_uamb, ax_uamb = plt.subplots(3,1)
fig_uamb.set_size_inches((8,8))
fig_uamb.suptitle("Ambulance Control Input over IBR Iterations")
# ax_uamb[0].plot(ibr_brounds_array, np.sum(all_uamb[0,:,:] * all_uamb[0,:,:], axis=0), '-o')
ax_uamb[0].bar(range(3), [
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_ego]),
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_pro]),
np.mean([np.sum(all_x[0,:,-1] * all_x[0,:,-1],axis=0) for all_x in all_uamb_altru])]
)
# ax_uamb[0].set_xlabel("IBR Iteration")
ax_uamb[0].set_ylabel(r"$\sum u_{\delta}^2$")
ax_uamb[0].set_xticks(range(3))
ax_uamb[0].set_xticklabels(svo_labels)
ax_uamb[1].bar(range(3), [
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_ego]),
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_pro]),
np.mean([np.sum(all_x[1,:,-1] * all_x[1,:,-1],axis=0) for all_x in all_uamb_altru])]
)
# ax_uamb[1].set_xlabel("IBR Iteration")
ax_uamb[1].set_ylabel(r"$\sum u_{v}^2$")
ax_uamb[1].set_xticks(range(3))
ax_uamb[1].set_xticklabels(svo_labels)
# ax_uamb[2].bar(range(3), [
# np.sum(all_uamb_ego[0,:,-1] * all_uamb_ego[0,:,-1],axis=0) + np.sum(all_uamb_ego[1,:,-1] * all_uamb_ego[1,:,-1],axis=0),
# np.sum(all_uamb_pro[0,:,-1] * all_uamb_pro[1,:,-1], axis=0) + np.sum(all_uamb_pro[1,:,-1] * all_uamb_pro[1,:,-1], axis=0),
# np.sum(all_uamb_altru[0,:,-1] * all_uamb_altru[0,:,-1],axis=0) + np.sum(all_uamb_altru[1,:,-1] * all_uamb_altru[1,:,-1],axis=0)],)
# ax_uamb[2].set_xlabel("Vehicles' Social Value Orientation")
# ax_uamb[2].set_ylabel("$\sum ||u||^2$")
ax_uamb[1].set_xticks(range(3))
ax_uamb[1].set_xticklabels(svo_labels)
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig2_amb_ctrl_iterations.eps'
fig_uamb.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
##########################################################
#### Convergence
#########################################################
fig_reluamb, ax_reluamb = plt.subplots(2,1)
# fig_reluamb.set_figheight(fig_height)
# fig_reluamb.set_figwidth(fig_width)
fig_reluamb.set_size_inches((8,6))
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
ax_reluamb[0].plot(ibr_brounds_array[0][1:], np.sum((all_uamb[0][0,:,1:]-all_uamb[0][0,:,0:-1])*(all_uamb[0][0,:,1:]-all_uamb[0][0,:,0:-1]), axis=0), '-o', label=label)
ax_reluamb[1].plot(ibr_brounds_array[0][1:], np.sum((all_uamb[0][1,:,1:]-all_uamb[0][1,:,0:-1])*(all_uamb[0][1,:,1:]-all_uamb[0][1,:,0:-1]), axis=0), '-o', label=label)
ax_reluamb[0].set_ylabel("$\sum (u_{v\delta,t}-u_{\delta,t-1})^2$")
ax_reluamb[1].set_xlabel("IBR Iteration")
ax_reluamb[1].set_ylabel("$\sum (u_{v,t}-u_{v,t-1})^2$")
ax_reluamb[0].legend()
ax_reluamb[1].legend()
fig_reluamb.suptitle("Change in Ambulance Control Input over IBR Iterations")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig3_change_amb_ctrl_iterations.eps'
fig_reluamb.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
###################################################################3
##################################################################
fig_xfinal, ax_xfinal = plt.subplots(2,1)
fig_xfinal.suptitle("Final Ambulance State Over Iterations")
fig_xfinal.set_size_inches((8,6))
# fig_xfinal.set_figheight(fig_height)
# fig_xfinal.set_figwidth(fig_width)
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
all_xamb = all_xamb_ego
all_other_x = all_other_x_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
all_xamb = all_xamb_pro
all_other_x = all_other_x_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_xamb = all_xamb_altru
all_other_x = all_other_x_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
ax_xfinal[0].plot(ibr_brounds_array[0], all_xamb[0][0,-1,:], '-o', label=label)
ax_xfinal[1].plot(ibr_brounds_array[0], all_xamb[0][2,-1,:], '-o', label=label)
# ax_reluamb[0].set_xlabel("IBR Iteration")
ax_xfinal[0].set_ylabel("$x_{final}$")
ax_xfinal[0].legend()
ax_xfinal[1].set_xlabel("IBR Iteration")
ax_xfinal[1].set_ylabel(r"$\Theta_{final}$")
ax_xfinal[1].legend()
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig4_iterations_ambperformance.eps'
fig_xfinal.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
################################################################################
###################### NOW PLOTTING THE OTHER VEHICLES #########################
fig_xfinal_all, ax_xfinal_all = plt.subplots(3,1)
fig_xfinal_all.suptitle("Comparing Distance Travel for the Vehicles")
fig_xfinal_all.set_size_inches((8,8))
# fig_xfinal_all.set_figheight(fig_height)
# fig_xfinal_all.set_figwidth(fig_width)
for sim_i in range(3):
if sim_i==0: #prosocial directory
all_uamb = all_uamb_ego
all_xamb = all_xamb_ego
all_other_x = all_other_x_ego
label = "Egoistic"
ibr_brounds_array = ibr_brounds_array_ego
elif sim_i==1: #egoistic directory
all_uamb = all_uamb_pro
all_xamb = all_xamb_pro
all_other_x = all_other_x_pro
label = "Prosocial"
ibr_brounds_array = ibr_brounds_array_pro
else: #altruistic directory
all_uamb = all_uamb_altru
all_xamb = all_xamb_altru
all_other_x = all_other_x_altru
all_other_u = all_other_u_altru
label = "Altruistic"
ibr_brounds_array = ibr_brounds_array_altru
bar_width = 0.5
inter_car_width = 2*bar_width
width_offset = bar_width*sim_i
ticks = [width_offset + (2*bar_width + inter_car_width)*c for c in range(n_other_cars + 1)]
# print(len(all_ither_x))
# ax_xfinal_all[0].bar(ticks,
# [np.mean([all_x[0, -1, -1] - all_x[0, 0, -1] for all_x in all_xamb])] + [np.mean(all_o_x[i][0,-1,-1] - all_o_x[i][0,0,-1]) for i in range(n_other_cars) for all_o_x in all_other_x],
# bar_width, label=label)
# ax_xfinal_all[0].set_xticks(range(n_other_cars + 1))
# ax_xfinal_all[0].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
# ax_xfinal_all[1].bar(ticks,
# [all_xamb[-1, -1, -1] - all_xamb[-1, 0, -1]] + [all_other_x[i][-1,-1,-1] - all_other_x[i][-1,0,-1] for i in range(n_other_cars)],
# bar_width, label=label)
# # ax_xfinal_all[1].set_xticks(range(n_other_cars + 1))
# # ax_xfinal_all[1].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
# ax_xfinal_all[2].bar(ticks,
# [np.sum(all_xamb[2,:,-1]*all_xamb[2,:,-1])] + [np.sum(all_other_x[i][2,:,-1]*all_other_x[i][2,:,-1]) for i in range(n_other_cars)],
# bar_width, label=label)
width_offset = bar_width*1
ticks = [width_offset + (2*bar_width + inter_car_width)*c for c in range(n_other_cars + 1)]
ax_xfinal_all[2].legend()
ax_xfinal_all[2].set_xticks(ticks)
ax_xfinal_all[2].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[0].set_ylabel("Horizontal Displacement $\Delta x$")
ax_xfinal_all[0].legend()
ax_xfinal_all[0].set_xticks(ticks)
ax_xfinal_all[0].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[1].set_ylabel("Total Distance $s_f - s_i$")
ax_xfinal_all[1].legend()
ax_xfinal_all[1].set_xticks(ticks)
ax_xfinal_all[1].set_xticklabels(["A"] + [str(i) for i in range(1, n_other_cars+1)])
ax_xfinal_all[2].set_ylabel("Angular Deviation $\sum_{t} \Theta_t^2$")
if SAVE:
fig_file_name = folder + 'plots/' + 'cfig5_vehicles_comparison.eps'
fig_xfinal_all.savefig(fig_file_name, dpi=95, format='eps')
print("Save to....", fig_file_name)
#########################Let's Reproduce the Table ####################33
print("Amb X Final Avg. Min. Max. ")
final_metric_ego = [all_x[0,-1,-1] for all_x in all_xamb_ego]
final_metric_pro = [all_x[0,-1,-1] for all_x in all_xamb_pro]
final_metric_altru = [all_x[0,-1,-1] for all_x in all_xamb_altru]
# print("Egoistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_ego[0,-1,-1], np.mean(all_xamb_ego[0,-1,:]), np.min(all_xamb_ego[0,-1,:]), np.max(all_xamb_ego[0,-1,:])))
# print("Prosocial & %.02f & %.02f & %.02f & %.02f"%(all_xamb_pro[0,-1,-1], np.mean(all_xamb_pro[0,-1,:]), np.min(all_xamb_pro[0,-1,:]), np.max(all_xamb_pro[0,-1,:])))
# print("Altruistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_altru[0,-1,-1], np.mean(all_xamb_altru[0,-1,:]), np.min(all_xamb_altru[0,-1,:]), np.max(all_xamb_altru[0,-1,:])))
print("Egoistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_ego), np.std(final_metric_ego), np.min(final_metric_ego), np.max(final_metric_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_pro), np.std(final_metric_pro), np.min(final_metric_pro), np.max(final_metric_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(final_metric_altru), np.std(final_metric_altru), np.min(final_metric_altru), np.max(final_metric_altru)))
final_metric_ego = [t_final[:,-1] for t_final in all_tfinalamb_ego]
final_metric_pro = [t_final[:,-1] for t_final in all_tfinalamb_pro]
final_metric_altru = [t_final[:,-1] for t_final in all_tfinalamb_altru]
# print(all_tfinalamb_ego[0].shape)
# print(final_metric_ego)
# print(final_metric_ego.shape)
# print("Egoistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_ego[0,-1,-1], np.mean(all_xamb_ego[0,-1,:]), np.min(all_xamb_ego[0,-1,:]), np.max(all_xamb_ego[0,-1,:])))
# print("Prosocial & %.02f & %.02f & %.02f & %.02f"%(all_xamb_pro[0,-1,-1], np.mean(all_xamb_pro[0,-1,:]), np.min(all_xamb_pro[0,-1,:]), np.max(all_xamb_pro[0,-1,:])))
# print("Altruistic & %.02f & %.02f & %.02f & %.02f"%(all_xamb_altru[0,-1,-1], np.mean(all_xamb_altru[0,-1,:]), np.min(all_xamb_altru[0,-1,:]), np.max(all_xamb_altru[0,-1,:])))
print("Time To "+str(x_goal)+"m")
print("Egoistic & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_ego), np.std(final_metric_ego), np.min(final_metric_ego), np.max(final_metric_ego),len(final_metric_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_pro), np.std(final_metric_pro), np.min(final_metric_pro), np.max(final_metric_pro),len(final_metric_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f %d"%(np.mean(final_metric_altru), np.std(final_metric_altru), np.min(final_metric_altru), np.max(final_metric_altru),len(final_metric_altru)))
print("Veh 1 Final Avg. Min. Max. ")
i = 0
veh_displace_ego = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_ego]
veh_displace_pro = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_pro]
veh_displace_altru = [all_other_x[i][0,-1,-1] - all_other_x[i][0,0,-1] for all_other_x in all_other_x_altru]
print(" ")
print("Egoistic & %.02f (%.02f) & %.02f & %.02f"%(np.mean(veh_displace_ego), np.std(veh_displace_ego), np.min(veh_displace_ego), np.max(veh_displace_ego)))
print("Prosocial & %.02f (%.02f) & %.02f & %.02f "%(np.mean(veh_displace_pro), np.std(veh_displace_pro), np.min(veh_displace_pro), np.max(veh_displace_pro)))
print("Altruistic & %.02f (%.02f) & %.02f & %.02f "%( np.mean(veh_displace_altru), np.std(veh_displace_altru), np.min(veh_displace_altru), np.max(veh_displace_altru)))
if PLOT:
plt.show()
| 24,165 | 11,427 |
import numpy as np
from tensorflow.keras.callbacks import TensorBoard
import cv2
import sys
import threading
import keras
from keras.layers import Conv2D,Dense,MaxPooling2D,Flatten,BatchNormalization,Dropout
from IPython.display import display
from PIL import Image
import tensorflow as tf
np.random.seed(1)
with tf.device('/gpu:0'):
keras_data=keras.preprocessing.image.ImageDataGenerator()
path1="D:\\tiroida\\celule\\leucemie_train"
date1 = keras_data.flow_from_directory(path1, target_size = (450, 450),batch_size=32, classes = ["normal","leucemie"], class_mode = "binary")
path2="D:\\tiroida\\celule\\leucemie_test"
date2 = keras_data.flow_from_directory(path2, target_size = (450, 450),batch_size=100, classes = ["normal","leucemie"], class_mode = "binary")
tfmodel=keras.models.Sequential()
tfmodel.add(Conv2D(filters=4,kernel_size=(3,3), padding='same',activation="relu",input_shape=(450,450,3)))
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(BatchNormalization())
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Conv2D(filters=8, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(Conv2D(filters=16, kernel_size=(3,3), activation="relu",padding='same'))
tfmodel.add(BatchNormalization())
tfmodel.add(MaxPooling2D(pool_size=(2,2)))
tfmodel.add(Flatten())
tfmodel.add(Dense(16, activation="relu"))
tfmodel.add(Dense(1, activation="sigmoid"))
tfmodel.compile(optimizer='Adam',loss="binary_crossentropy", metrics=["accuracy"])
checkpoint = keras.callbacks.ModelCheckpoint(filepath='leucemie.h5', save_best_only=True,monitor='val_acc')
tfmodel.fit_generator(date1,validation_data=date2,epochs=10,steps_per_epoch=100,validation_steps=1,callbacks=[checkpoint])
model=keras.models.load_model('leucemie.h5')
print(model.evaluate_generator(date2,steps=1))
input() | 2,082 | 774 |
#!/usr/bin/env python
import distutils
from setupfiles.dist import DistributionMetadata
from setupfiles.setup import setup
__all__ = ["setup"]
distutils.dist.DistributionMetadata = DistributionMetadata
distutils.core.setup = setup
| 233 | 61 |
import Gramatica
def testSeparadorDeSilabas(entrada, esperado):
try:
salida = Gramatica.separarEnSilabas(entrada)
except Gramatica.NoHayVocal:
print("[ERROR]","Salida esperada:", "\"" + esperado + "\"", "|", "Salida obtenida:", "Excepcion: No hay vocal")
return
if esperado != salida:
print("[ERROR]","Salida esperada:", "\"" + esperado + "\"", "|", "Salida obtenida:", "\"" + salida + "\"")
else:
print("[OK]","Entrada:", "\"" + entrada + "\"", "|", "Salida:", "\"" + salida + "\"")
testSeparadorDeSilabas("AprEnDer", "A-prEn-Der")
testSeparadorDeSilabas("ÉpiCo", "É-pi-Co")
testSeparadorDeSilabas("PÓDIO", "PÓ-DIO")
testSeparadorDeSilabas("aprender", "a-pren-der")
testSeparadorDeSilabas("tabla", "ta-bla")
testSeparadorDeSilabas("ratón", "ra-tón")
testSeparadorDeSilabas("épico", "é-pi-co")
testSeparadorDeSilabas("brocha", "bro-cha") # grupos consonanticos br, cr, dr, gr, fr, kr, tr, bl, cl, gl, fl, kl, pl son inseparables
testSeparadorDeSilabas("abrazo", "a-bra-zo")
testSeparadorDeSilabas("submarino", "sub-ma-ri-no") # los prefijos pueden o no separarse
testSeparadorDeSilabas("perspicacia", "pers-pi-ca-cia") # 3 consonantes consecutivas, 2 van a la silaba anterior y 1 a la siguiente
testSeparadorDeSilabas("conspirar", "cons-pi-rar")
testSeparadorDeSilabas("obscuro", "obs-cu-ro")
testSeparadorDeSilabas("irreal", "i-rre-al") # no se pueden separar las rr
testSeparadorDeSilabas("acallar", "a-ca-llar") # no se pueden separar las ll
testSeparadorDeSilabas("abstracto", "abs-trac-to") # 4 consonantes consecutivas, 2 van a la silaba anterior y 2 a la siguiente
testSeparadorDeSilabas("rubia", "ru-bia") # los diptongos no se separan
testSeparadorDeSilabas("labio", "la-bio")
testSeparadorDeSilabas("caigo", "cai-go")
testSeparadorDeSilabas("oigo", "oi-go")
testSeparadorDeSilabas("descafeinado", "des-ca-fei-na-do")
testSeparadorDeSilabas("diurno", "diur-no")
testSeparadorDeSilabas("ruido", "rui-do")
testSeparadorDeSilabas("pódio", "pó-dio")
testSeparadorDeSilabas("aplanar", "a-pla-nar")
testSeparadorDeSilabas("ocre", "o-cre")
testSeparadorDeSilabas("archi", "ar-chi")
testSeparadorDeSilabas("leer", "le-er")
testSeparadorDeSilabas("caos", "ca-os")
testSeparadorDeSilabas("baúl", "ba-úl")
testSeparadorDeSilabas("ambiguo", "am-bi-guo")
testSeparadorDeSilabas("antifaz", "an-ti-faz")
testSeparadorDeSilabas("transplantar", "trans-plan-tar")
testSeparadorDeSilabas("substraer", "subs-tra-er")
testSeparadorDeSilabas("abstraer", "abs-tra-er")
testSeparadorDeSilabas("abstracto", "abs-trac-to")
testSeparadorDeSilabas("pingüino", "pin-güi-no")
testSeparadorDeSilabas("vergüenza", "ver-güen-za")
testSeparadorDeSilabas("bilingüe", "bi-lin-güe")
testSeparadorDeSilabas("baúl ocre", "ba-úl o-cre")
testSeparadorDeSilabas("", "")
testSeparadorDeSilabas(" ", " ")
testSeparadorDeSilabas(" ", " ")
testSeparadorDeSilabas("k", "k")
testSeparadorDeSilabas("1", "1")
testSeparadorDeSilabas("abstraer abstracto", "abs-tra-er abs-trac-to") | 3,002 | 1,256 |
#
# Minify JSON data files in the `/dist` directory.
# Script invoked by the npm postbuild script after building the project with `npm run build`.
#
from os import (
path,
listdir,
fsdecode
)
import json
from datetime import datetime
class JSONMinifier:
DIST_CONSTITUENT_DATA_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..', 'dist', 'assets', 'data'))
DIST_SNAPSHOT_DATA_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..', 'dist', 'assets', 'data'))
def minify_json(self, directory):
for file in listdir(directory):
filename = fsdecode(file)
if filename.endswith(".json"):
with open(path.join(directory, filename), "r+") as f:
data = json.loads(f.read())
f.seek(0)
f.write(json.dumps(data, separators=(',', ':')))
f.truncate()
print(f"{datetime.now().strftime('%Y/%m/%d %H:%M:%S')} | Minified {filename}")
if __name__ == '__main__':
minifier = JSONMinifier()
minifier.minify_json(minifier.DIST_CONSTITUENT_DATA_DIRECTORY)
minifier.minify_json(minifier.DIST_SNAPSHOT_DATA_DIRECTORY)
| 1,201 | 397 |
import subprocess
import platform
from Scripts import plist, utils
class CPUName:
def __init__(self, **kwargs):
self.u = utils.Utils("CPU-Name")
self.plist_path = None
self.plist_data = {}
self.clear_empty = True
self.detected = self.detect_cores()
self.cpu_model = self.detect_cpu_model()
def ensure_path(self, plist_data, path_list, final_type = list):
if not path_list: return plist_data
last = plist_data
for index,path in enumerate(path_list):
if not path in last:
if index >= len(path_list)-1:
last[path] = final_type()
else:
last[path] = {}
last = last[path]
return plist_data
def select_plist(self):
while True:
self.u.head("Select Plist")
print("")
print("M. Return To Menu")
print("Q. Quit")
print("")
plist_path = self.u.grab("Please drag and drop your config.plist here: ")
if not len(plist_path): continue
elif plist_path.lower() == "m": return
elif plist_path.lower() == "q": self.u.custom_quit()
path_checked = self.u.check_path(plist_path)
if not path_checked: continue
# Got a valid path here - let's try to load it
try:
with open(path_checked,"rb") as f:
plist_data = plist.load(f)
if not isinstance(plist_data,dict):
raise Exception("Plist root is not a dictionary")
except Exception as e:
self.u.head("Error Loading Plist")
print("\nCould not load {}:\n\n{}\n\n".format(path_checked,repr(e)))
self.u.grab("Press [enter] to return...")
continue
# Got valid plist data - let's store the vars and return
self.plist_path = path_checked
self.plist_data = plist_data
return (path_checked,plist_data)
def get_value(self, plist_data, search="revcpuname"):
boot_args = plist_data.get("NVRAM",{}).get("Add",{}).get("7C436110-AB2A-4BBB-A880-FE41995C9F82",{}).get("boot-args","")
nvram_val = plist_data.get("NVRAM",{}).get("Add",{}).get("4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102",{}).get(search,"")
boota_val = ""
for arg in boot_args.split():
if not arg.startswith(search+"="): continue
boota_val = arg.split("=")[-1]
break # Only take the first instance
return (boota_val,nvram_val)
def get_cpu_name(self, plist_data):
return self.get_value(plist_data,"revcpuname")
def get_rev_cpu(self, plist_data):
return self.get_value(plist_data,"revcpu")
def get_proc_type(self, plist_data):
return plist_data.get("PlatformInfo",{}).get("Generic",{}).get("ProcessorType",0)
def get_kext(self, plist_data):
kext_list = plist_data.get("Kernel",{}).get("Add",[])
found = enabled = False
for kext in kext_list:
if kext.get("ExecutablePath","").lower() == "contents/macos/restrictevents":
found = True
if kext.get("Enabled"):
enabled = True
break
return (found,enabled)
def get_new_proc_type(self, plist_data):
while True:
p_type = self.get_proc_type(plist_data)
p_label = " (8+ Core)" if p_type == 3841 else " (1, 2, 4, or 6 Core)" if p_type == 1537 else " (Must be 0x0601 or 0x0F01 to work)"
self.u.head("ProcessorType")
print("")
print("Current Processor Type: {}{}".format(self.get_hex(p_type),p_label))
print("")
print("1. Set to 0x0601 for 1, 2, 4, or 6 Core")
print("2. Set to 0x0F01 for 8+ Core")
print("3. Reset to the default 0x00")
print("")
if self.detected != -1:
print("L. Use Local Machine's Value ({:,} Core{} = {})".format(self.detected, "" if self.detected==1 else "s", "0x0601" if self.detected < 8 else "0x0F01"))
print("M. Return To Menu")
print("Q. Quit")
print("")
proc = self.u.grab("Please select an option: ")
if not len(proc): continue
if proc.lower() == "m": return None
elif proc.lower() == "q": self.u.custom_quit()
elif proc == "1": return 1537
elif proc == "2": return 3841
elif proc == "3": return 0
elif self.detected != -1 and proc.lower() == "l": return 1537 if self.detected < 8 else 3841
def detect_cpu_model(self):
try:
_platform = platform.system().lower()
if _platform == "darwin":
return subprocess.check_output(["sysctl", "-n", "machdep.cpu.brand_string"]).decode().strip()
elif _platform == "windows":
return subprocess.check_output(["wmic", "cpu", "get", "Name"]).decode().split("\n")[1].strip()
elif _platform == "linux":
data = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode().split("\n")
for line in data:
if line.startswith("model name"):
return ": ".join([x for x in line.split(": ")[1:]])
except:
pass
return ""
def detect_cores(self):
try:
_platform = platform.system().lower()
if _platform == "darwin":
return int(subprocess.check_output(["sysctl", "-a", "machdep.cpu.core_count"]).decode().split(":")[1].strip())
elif _platform == "windows":
return int(subprocess.check_output(["wmic", "cpu", "get", "NumberOfCores"]).decode().split("\n")[1].strip())
elif _platform == "linux":
data = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode().split("\n")
for line in data:
if line.startswith("cpu cores"):
return int(line.split(":")[1].strip())
except:
pass
return -1
def set_values(self, revcpu, cpuname, proctype, plist_data):
# Clear any prior values and ensure pathing
plist_data = self.clear_values(plist_data)
plist_data = self.ensure_path(plist_data,["NVRAM","Add","4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"],dict)
plist_data = self.ensure_path(plist_data,["PlatformInfo","Generic","ProcessorType"],int)
# Set our new values
plist_data["NVRAM"]["Add"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"]["revcpu"] = revcpu
plist_data["NVRAM"]["Add"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"]["revcpuname"] = cpuname
plist_data["PlatformInfo"]["Generic"]["ProcessorType"] = proctype
return plist_data
def clear_values(self, plist_data):
# Ensure Delete values exist so we can prevent old values from sticking
plist_data = self.ensure_path(plist_data,["NVRAM","Delete","4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"],list)
plist_data = self.ensure_path(plist_data,["NVRAM","Delete","7C436110-AB2A-4BBB-A880-FE41995C9F82"],list)
# Gather our values
boot_args = plist_data["NVRAM"].get("Add",{}).get("7C436110-AB2A-4BBB-A880-FE41995C9F82",{}).get("boot-args","")
nv_a_val = plist_data["NVRAM"].get("Add",{}).get("4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102",{})
nv_d_val = plist_data["NVRAM"]["Delete"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"]
# Walk boot args to see if we use any revcpu* values and remove them
if any(x in boot_args for x in ("revcpu=","revcpuname=")):
boot_args = " ".join([x for x in boot_args.split() if not x.startswith(("revcpu=","revcpuname="))])
plist_data["NVRAM"]["Add"]["7C436110-AB2A-4BBB-A880-FE41995C9F82"]["boot-args"] = boot_args
# Remove them from the NVRAM -> Add section
if any(x in nv_a_val for x in ("revcpu","revcpuname")):
for x in ("revcpu","revcpuname"):
nv_a_val.pop(x,None)
if nv_a_val:
plist_data["NVRAM"]["Add"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"] = nv_a_val
elif self.clear_empty:
# Clean out the UUID if empty
plist_data["NVRAM"]["Add"].pop("4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102",None)
# Ensure they remain in the NVRAM -> Delete section to prevent stuck values
for x in ("revcpu","revcpuname"):
if x in nv_d_val: continue
nv_d_val.append(x)
# Make sure we override boot-args to avoid any stickage too
if not "boot-args" in plist_data["NVRAM"]["Delete"]["7C436110-AB2A-4BBB-A880-FE41995C9F82"]:
plist_data["NVRAM"]["Delete"]["7C436110-AB2A-4BBB-A880-FE41995C9F82"].append("boot-args")
plist_data["NVRAM"]["Delete"]["4D1FDA02-38C7-4A6A-9CC6-4BCCA8B30102"] = nv_d_val
if plist_data.get("PlatformInfo",{}).get("Generic",{}).get("ProcessorType",0) != 0:
plist_data["PlatformInfo"]["Generic"]["ProcessorType"] = 0
return plist_data
def get_hex(self, value, pad_to=2):
if not isinstance(value,int): return ""
h = hex(value)[2:]
return "0x"+("0"*(len(h)%pad_to))+h.upper()
def get_new_cpu_name(self, plist_data):
while True:
cpu_nam = self.get_cpu_name(plist_data)
self.u.head("New CPU Name")
print("")
print("Current CPU Name: {}".format(cpu_nam[0]+" (boot-arg)" if cpu_nam[0] else cpu_nam[1] if cpu_nam[1] else "Not Set"))
print("")
if self.cpu_model:
print("L. Use Local Machine's Value ({})".format(self.cpu_model))
print("M. Return To Menu")
print("Q. Quit")
print("")
name = self.u.grab("Please enter a new CPU name: ")
if not len(name): continue
elif name.lower() == "m": return
elif name.lower() == "q": self.u.custom_quit()
elif self.cpu_model and name.lower() == "l": return self.cpu_model
return name
def save_plist(self):
try:
with open(self.plist_path,"wb") as f:
plist.dump(self.plist_data,f)
except Exception as e:
self.u.head("Error Saving Plist")
print("\nCould not save {}:\n\n{}\n\n".format(self.plist_path,repr(e)))
self.u.grab("Press [enter] to return...")
return False
return True
def main(self):
while True:
cpu_rev = self.get_rev_cpu(self.plist_data)
cpu_nam = self.get_cpu_name(self.plist_data)
p_type = self.get_proc_type(self.plist_data)
p_label = " (8+ Core)" if p_type == 3841 else " (1, 2, 4, or 6 Core)" if p_type == 1537 else " (Must be 0x0601 or 0x0F01 to work!)"
f,e = self.get_kext(self.plist_data)
k_label = "Not Found (Must be present and Enabled to work!)" if not f else "Disabled (Must be Enabled to work!)" if not e else "Found and Enabled"
self.u.head()
print("")
print("Selected Plist: {}".format(self.plist_path))
print("Rev CPU Name: {}".format("" if not self.plist_path else cpu_nam[0]+" (boot-arg)" if cpu_nam[0] else cpu_nam[1] if cpu_nam[1] else "Not Set"))
print("Rev CPU: {}".format("" if not self.plist_path else cpu_rev[0]+" (boot-arg)" if cpu_rev[0] else cpu_rev[1] if cpu_rev[1] else "Not Set"))
print("Processor Type: {}{}".format("" if not self.plist_path else self.get_hex(p_type),"" if not self.plist_path else p_label))
print("RestrictEvents: {}".format("" if not self.plist_path else k_label))
print("")
print("Note: Changes are saved to the target plist immediately.")
print(" Make sure you keep a backup!")
print("")
print("1. Change CPU Name")
print("2. Change Processor Type")
print("3. Clear CPU Name, Rev CPU, and Processor Type")
print("4. Select Plist")
print("")
print("Q. Quit")
print("")
menu = self.u.grab("Please select an option: ")
if not len(menu): continue
elif menu.lower() == "q": self.u.custom_quit()
if menu in ("1","2","3") and not self.plist_path:
self.select_plist()
if not self.plist_path: continue
p_type = self.get_proc_type(self.plist_data) # Gather new proc type after loading
if menu == "1":
if not p_type in (3841,1537):
new_type = self.get_new_proc_type(self.plist_data)
if new_type is None: continue
p_type = new_type
new_name = self.get_new_cpu_name(self.plist_data)
if new_name is None: continue
self.plist_data = self.set_values(1,new_name,p_type,self.plist_data)
self.save_plist()
elif menu == "2":
new_type = self.get_new_proc_type(self.plist_data)
if new_type is None: continue
self.plist_data = self.ensure_path(self.plist_data,["PlatformInfo","Generic","ProcessorType"],int)
self.plist_data["PlatformInfo"]["Generic"]["ProcessorType"] = new_type
self.save_plist()
elif menu == "3":
self.plist_data = self.clear_values(self.plist_data)
self.save_plist()
elif menu == "4":
self.select_plist()
c = CPUName()
c.main()
| 13,751 | 4,705 |
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016, Yung-Yu Chen <yyc@solvcon.net>
# BSD 3-Clause License, see COPYING
import os
import numpy as np
import solvcon as sc
class Probe(object):
"""
Represent a point in the mesh.
"""
def __init__(self, *args, **kw):
self.speclst = kw.pop('speclst')
self.name = kw.pop('name', None)
self.crd = np.array(args, dtype='float64')
self.pcl = -1
self.vals = list()
def __str__(self):
crds = ','.join(['%g'%val for val in self.crd])
return 'Pt/%s#%d(%s)%d' % (self.name, self.pcl, crds, len(self.vals))
def locate_cell(self, svr):
icl, ifl, jcl, jfl = svr.alg.locate_point(self.crd)
self.pcl = icl
def __call__(self, svr, time):
ngstcell = svr.ngstcell
vlist = [time]
for spec in self.speclst:
arr = None
if isinstance(spec, str):
arr = svr.der[spec] # FIXME: translate to qty
elif isinstance(spec, int):
if spec >= 0 and spec < svr.neq:
arr = svr.sol.so0n.F[:,spec]
elif spec < 0 and -1-spec < svr.neq:
spec = -1-spec
arr = svr.sol.so0c.F[:,spec]
if arr is None:
raise IndexError('spec %s incorrect'%str(spec))
vlist.append(arr[ngstcell+self.pcl])
self.vals.append(vlist)
class ProbeAnchor(sc.MeshAnchor):
"""
Anchor for probe.
"""
def __init__(self, svr, **kw):
speclst = kw.pop('speclst')
self.points = list()
for data in kw.pop('coords'):
pkw = {'speclst': speclst, 'name': data[0]}
self.points.append(Probe(*data[1:], **pkw))
super(ProbeAnchor, self).__init__(svr, **kw)
def preloop(self):
for point in self.points: point.locate_cell(self.svr)
for point in self.points: point(self.svr, self.svr.time)
def postfull(self):
for point in self.points: point(self.svr, self.svr.time)
class ProbeHook(sc.MeshHook):
"""
Point probe.
"""
def __init__(self, cse, **kw):
self.name = kw.pop('name', 'ppank')
super(ProbeHook, self).__init__(cse, **kw)
self.ankkw = kw
self.points = None
def drop_anchor(self, svr):
ankkw = self.ankkw.copy()
ankkw['name'] = self.name
self._deliver_anchor(svr, ProbeAnchor, ankkw)
def _collect(self):
cse = self.cse
if cse.is_parallel:
dom = cse.solver.domainobj
dealer = cse.solver.dealer
allpoints = list()
for iblk in range(dom.nblk):
dealer[iblk].cmd.pullank(self.name, 'points', with_worker=True)
allpoints.append(dealer[iblk].recv())
npt = len(allpoints[0])
points = [None]*npt
for rpoints in allpoints:
ipt = 0
while ipt < npt:
if points[ipt] == None and rpoints[ipt].pcl >=0:
points[ipt] = rpoints[ipt]
ipt += 1
else:
svr = self.cse.solver.solverobj
points = [pt for pt in svr.runanchors[self.name].points
if pt.pcl >= 0]
self.points = points
def postmarch(self):
psteps = self.psteps
istep = self.cse.execution.step_current
if istep%psteps != 0: return False
self._collect()
return True
def postloop(self):
for point in self.points:
ptfn = '%s_pt_%s_%s.npy' % (
self.cse.io.basefn, self.name, point.name)
ptfn = os.path.join(self.cse.io.basedir, ptfn)
np.save(ptfn, np.array(point.vals, dtype='float64'))
# vim: set ff=unix fenc=utf8 ft=python nobomb et sw=4 ts=4 tw=79:
| 3,853 | 1,368 |
class BuyingCheap:
def thirdBestPrice(self, prices):
ls = sorted(set(prices))
return -1 if len(ls) < 3 else ls[2]
| 134 | 48 |
""" COCO dataset (quick and dirty)
Hacked together by Ross Wightman
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import os
import cv2
import random
import torch
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
class CocoDetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
ann_file (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
"""
def __init__(self, root, ann_file, transform=None):
super(CocoDetection, self).__init__()
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
self.transform = transform
self.yxyx = True # expected for TF model, most PT are xyxy
self.include_masks = False
self.include_bboxes_ignore = False
self.has_annotations = 'image_info' not in ann_file
self.coco = None
self.cat_ids = []
self.cat_to_label = dict()
self.img_ids = []
self.img_ids_invalid = []
self.img_infos = []
self._load_annotations(ann_file)
def _load_annotations(self, ann_file):
assert self.coco is None
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
img_ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for img_id in sorted(self.coco.imgs.keys()):
info = self.coco.loadImgs([img_id])[0]
valid_annotation = not self.has_annotations or img_id in img_ids_with_ann
if valid_annotation and min(info['width'], info['height']) >= 32:
self.img_ids.append(img_id)
self.img_infos.append(info)
else:
self.img_ids_invalid.append(img_id)
def _parse_img_ann(self, img_id, img_info):
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
bboxes = []
bboxes_ignore = []
cls = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if self.include_masks and ann['area'] <= 0:
continue
if w < 1 or h < 1:
continue
# To subtract 1 or not, TF doesn't appear to do this so will keep it out for now.
if self.yxyx:
#bbox = [y1, x1, y1 + h - 1, x1 + w - 1]
bbox = [y1, x1, y1 + h, x1 + w]
else:
#bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
if self.include_bboxes_ignore:
bboxes_ignore.append(bbox)
else:
bboxes.append(bbox)
cls.append(self.cat_to_label[ann['category_id']] if self.cat_to_label else ann['category_id'])
if bboxes:
bboxes = np.array(bboxes, dtype=np.float32)
cls = np.array(cls, dtype=np.int64)
else:
bboxes = np.zeros((0, 4), dtype=np.float32)
cls = np.array([], dtype=np.int64)
if self.include_bboxes_ignore:
if bboxes_ignore:
bboxes_ignore = np.array(bboxes_ignore, dtype=np.float32)
else:
bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(img_id=img_id, bbox=bboxes, cls=cls, img_size=(img_info['width'], img_info['height']))
if self.include_bboxes_ignore:
ann['bbox_ignore'] = bboxes_ignore
return ann
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, annotations (target)).
"""
img_id = self.img_ids[index]
img_info = self.img_infos[index]
if self.has_annotations:
ann = self._parse_img_ann(img_id, img_info)
else:
ann = dict(img_id=img_id, img_size=(img_info['width'], img_info['height']))
path = img_info['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img, ann = self.transform(img, ann)
return img, ann
def __len__(self):
return len(self.img_ids)
class Custom_Dataset(data.Dataset):
def __init__(self, root, data, image_ids, transform=None, test=False):
self.root = root
self.data = data
self.image_ids = image_ids
self.transform = transform
self.test = test
def _load_data(self, index):
image_id = self.image_ids[index]
image = cv2.imread(f'{self.root}/{image_id}.jpg', cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
record = self.data[self.data['image_id'] == image_id]
boxes = record[['x', 'y', 'w', 'h']].values
boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
return image, boxes
def _load_cutmix_data(self, index, imgsize=1024):
w, h = imgsize, imgsize
s = imgsize // 2
xc, yc = [int(random.uniform(imgsize * .25, imgsize * .75)) for _ in range(2)]
indexes = [index] + [random.randint(0, self.image_ids.shape[0] - 1) for _ in range(3)]
result_image = np.full((imgsize, imgsize, 3), 1, dtype=np.float32)
result_boxes = []
for i, index in enumerate(indexes):
image, boxes = self._load_data(index)
if i == 0:
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
result_image[y1a:y2a, x1a:x2a] = image[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
boxes[:, 0] += padw
boxes[:, 1] += padh
boxes[:, 2] += padw
boxes[:, 3] += padh
result_boxes.append(boxes)
result_boxes = np.concatenate(result_boxes, 0)
np.clip(result_boxes[:, 0:], 0, 2 * s, out=result_boxes[:, 0:])
result_boxes = result_boxes.astype(np.int32)
result_boxes = result_boxes[np.where((result_boxes[:, 2] - result_boxes[:, 0]) * (result_boxes[:, 3] - result_boxes[:, 1]) > 0)]
return result_image, result_boxes
def __getitem__(self, index: int):
image_id = self.image_ids[index]
if self.test or random.random() > 0.35:
image, boxes = self._load_data(index)
elif random.random() > 0.5:
image, boxes = self._load_cutmix_data(index)
else:
image, boxes = self._load_cutmix_data(index)
labels = torch.ones((boxes.shape[0]), dtype=torch.int64)
target = {}
target['boxes'] = boxes
target['labels'] = labels
target['image_id'] = torch.tensor(index)
if self.transform:
for i in range(10):
sample = self.transform(**{
'image': image,
'bboxes': target['boxes'],
'labels': labels
})
if len(sample['bboxes']) > 0:
image = sample['image']
target['boxes'] = torch.stack(tuple(map(torch.tensor, zip(*sample['bboxes'])))).permute(1, 0)
target['boxes'][:, [0, 1, 2, 3]] = target['boxes'][:, [1, 0, 3, 2]]
break
return image, target, image_id
def __len__(self) -> int:
return self.image_ids.shape[0] | 8,600 | 3,072 |
'''OpenGL extension EXT.sRGB_write_control
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.sRGB_write_control to provide a more
Python-friendly API
Overview (from the spec)
This extension's intent is to expose new functionality which allows an
application the ability to decide if the conversion from linear space to
sRGB is necessary by enabling or disabling this conversion at framebuffer
write or blending time. An application which passes non-linear vector data
to a shader may not want the color conversion occurring, and by disabling
conversion the application can be simplified, sometimes in very significant
and more optimal ways.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/sRGB_write_control.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.sRGB_write_control import *
from OpenGL.raw.GLES2.EXT.sRGB_write_control import _EXTENSION_NAME
def glInitSrgbWriteControlEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 1,292 | 350 |
from imagenet.models.biggan import BigGAN
from imagenet.models.u2net import U2NET
from imagenet.models.cgn import CGN
from imagenet.models.classifier_ensemble import InvariantEnsemble
__all__ = [
CGN, InvariantEnsemble, BigGAN, U2NET
]
| 241 | 88 |
# Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def osdialog():
http_archive(
name="osdialog" ,
build_file="//bazel/deps/osdialog:build.BUILD" ,
sha256="3fc6dabcf1fcfdca5fd87f2474a113e17212da4211b3fa3deecd263a6a59dc26" ,
strip_prefix="osdialog-a3ca84070c620b186f475ea17b86e65efab5ce57" ,
urls = [
"https://github.com/Unilang/osdialog/archive/a3ca84070c620b186f475ea17b86e65efab5ce57.tar.gz",
],
)
| 595 | 283 |
from yaml import safe_load
from .blocklist import Blocklist
class Configuration:
def __init__(self, location):
self.__location = location
with open(location) as file:
data = safe_load(file)
self.repo_path = data["repository"]["path"]
self.dnsmasq_path = data["dnsmasq"]["path"]
self.blocklists = []
for blocklist in data["blocklists"]:
self.blocklists.append(Blocklist(blocklist["name"], blocklist["url"]))
# TODO handle the configuration here | 529 | 146 |
""" Create meta data file 'metadata.yaml' for :class:`~pySPACE.resources.dataset_defs.feature_vector.FeatureVectorDataset`
Used for external files, which can not be read directly in pySPACE.
Eg. csv files without names.
To be called in the dataset directory.
"""
def main(md_file):
# Request all necessary data from user
data={}
msg = "Please enter the name of the file. --> "
data['file_name'] = get_user_input(msg)
msg = "Please enter the storage_format of the data.\n "
msg += "one of arff, csv (csv with header), csvUnnamed (csv without header)--> "
data['format'] = get_user_input(msg)
if data['format'] != 'arff':
while True:
msg = "Please enter csv delimiter/separator. \n"
msg += "(e.g. ',' , ' ' , ';' or '\t' for a tab, default:',')-->"
data['delimiter'] = get_user_input(msg)
if check_delimiter(data):
break
msg = "Please enter all rows that can be ignored, separated by comma or range.\n "
msg += "eg. [1, 2, 3] or [1-3] --> "
data['rows'] = get_numerical_user_input(msg)
msg = "Please enter all columns that can be ignored, separated by comma or range.\n"
msg += "The first row gets number 1."
msg += "eg. [1, 2, 3] or [1-3] --> "
data['columns'] = get_numerical_user_input(msg)
msg = 'Please enter the column that contains the label. -1 for last column \n --> '
data['label'] = get_user_input(msg)
meta_data = generate_meta_data(data)
write_md_file(meta_data, md_file)
info_string = """\nMeta data file %s written. \n"""%md_file
give_info(info_string)
def get_numerical_user_input(msg):
""" Request input, split it by ',' and parse it for '-' """
tmp_info = raw_input(msg)
tmp_info = tmp_info.replace(' ', '').split(',')
return parse_list(tmp_info)
def get_user_input(msg):
""" Request input """
return raw_input(msg)
def parse_list(input_list):
""" Replace range by explicit numbers """
info = []
for index in input_list:
if type(index) == int:
info.append(index)
if not type(index) == str:
info.append(int(index))
# zero is not an accepted index
if index == '0' or index == '':
continue
# replacing '-' with actual indices
if '-' in str(index):
index_split = index.split('-')
# to handle -1 input
if index_split[0] == '':
info.append(int(index))
continue
low = int(index_split[0])
high = int(index_split[1])
rnge = high - low
new_index = [low]
for i in range(rnge):
new_index.append(low + i + 1)
info = info.extend(new_index)
else:
info.append(int(index))
return info
def check_delimiter(data):
""" Checks delimiter to have length one """
delimiter = data["delimiter"]
if len(delimiter) == 0:
# add the deleted spaces
data["delimiter"]=' '
return True
elif len(delimiter)==1:
# tabulator is included here
return True
else:
import warnings
warnings.warn('To long delimiter. Only 1 sign allowed. Please try again.')
def generate_meta_data(data):
""" Map data to the metadata.yaml string and set defaults """
meta_data = "author: " + os.environ['USER'] + '\n' + \
"date: " + time.strftime("%Y%m%d")+ '\n' + \
"type: feature_vector" + "\n"
for item in data.items():
if item[1] != '':
if item[0] == 'file_name':
meta_data += "file_name: " + str(data["file_name"]) + "\n"
elif item[0] == 'format':
meta_data += "storage_format: [" + str(data["format"]) + ', real]' + "\n"
elif item[0] == 'rows':
meta_data += "ignored_rows: " + str(data["rows"]) + "\n"
elif item[0] == 'columns':
meta_data += "ignored_columns: " + str(data["columns"]) + "\n"
elif item[0] == 'label':
meta_data += "label_column: " + str(data["label"]) + "\n"
else: # set defaults
if item[0] == 'file_name':
meta_data += "file_name: " + "file_name.csv" + "\n"
elif item[0] == 'format':
meta_data += "storage_format: [" + "csv" + ', real]' + "\n"
elif item[0] == 'rows':
meta_data += "ignored_rows: " + "[]" + "\n"
elif item[0] == 'columns':
meta_data += "ignored_columns: " + "[]" + "\n"
elif item[0] == 'label':
meta_data += "label_column: " + str(-1) + "\n"
return meta_data
def write_md_file(meta_data, md_file):
meta_data_file = open(md_file, "w")
meta_data_file.write(meta_data)
meta_data_file.close()
def give_info(msg):
print msg
import os, time, sys
if __name__ == "__main__":
info_string = "\nRunning meta data creator ... \n"
give_info(info_string)
md_file = "metadata.yaml"
if not os.path.isfile(md_file):
main(md_file)
else:
msg = "'metadata.yaml' already exists! \n"
give_info(msg)
yes_no = raw_input("Overwrite? y/n: ")
if yes_no == "y":
main(md_file)
else:
msg = "Exiting ... \n"
give_info(msg)
sys.exit(0)
| 5,532 | 1,749 |
""" Testing Abstract LUT model
"""
import unittest
import os
import shutil
import tempfile
from PyOpenColorIO.Constants import INTERP_LINEAR, INTERP_TETRAHEDRAL
from utils import lut_presets as presets
from utils.lut_presets import PresetException, OUT_BITDEPTH
import utils.abstract_lut_helper as alh
from utils.colorspaces import REC709, SGAMUTSLOG, ALEXALOGCV3
from utils.csp_helper import CSP_HELPER
from utils.cube_helper import CUBE_HELPER
from utils.threedl_helper import THREEDL_HELPER, SHAPER, MESH
from utils.spi_helper import SPI_HELPER
from utils.ascii_helper import ASCII_HELPER, AsciiHelperException
from utils.clcc_helper import CLCC_HELPER
from utils.json_helper import JSON_HELPER
from utils.ocio_helper import create_ocio_processor
from utils.lut_utils import get_input_range
DISPLAY = False
class AbstractLUTTest(unittest.TestCase):
""" Test export of different type of LUTs
"""
def setUp(self):
test_dir = os.path.join(os.path.dirname(__file__), 'test_files')
self.tmp_dir = os.path.join(tempfile.gettempdir(), 'testCoPipe')
if not os.path.exists(self.tmp_dir):
os.mkdir(self.tmp_dir)
# create OCIO processor
lut1d = os.path.join(test_dir, 'CineonToLin_1D.csp')
lut3d = os.path.join(test_dir, 'saturation.3dl')
self.processor_1d = create_ocio_processor(lut1d,
interpolation=INTERP_LINEAR)
self.processor_3d = create_ocio_processor(lut3d,
interpolation=INTERP_TETRAHEDRAL)
self.helpers_1d_to_test = [
(CUBE_HELPER, '.cube'),
[SPI_HELPER, '.spi1d'],
(CSP_HELPER, '.csp'),
]
self.helpers_3d_to_test = [
(CUBE_HELPER, '.cube', True),
[SPI_HELPER, '.spi3d', True],
(CSP_HELPER, '.csp', True),
(THREEDL_HELPER, '.3dl', True),
(CLCC_HELPER, '.cc', False),
(JSON_HELPER, '.json', False)
]
def test_default_1d_lut(self):
""" Test a default 1d LUT export
"""
outlutfiles = []
for helper, ext in self.helpers_1d_to_test:
outlutfile = os.path.join(self.tmp_dir, "default_1D" + ext)
args_1d = helper.get_default_preset()
helper.write_1d_lut(self.processor_1d.applyRGB, outlutfile,
args_1d)
# create a processor and try it
proc = create_ocio_processor(outlutfile,
interpolation=INTERP_LINEAR)
proc.applyRGB([0, 0, 0])
proc.applyRGB([1, 1, 1])
outlutfiles.append(outlutfile)
if DISPLAY:
import plot_that_lut
plot_that_lut.plot_that_lut(outlutfiles)
def test_default_3d_lut(self):
""" Test a default 3d LUT export
"""
for helper, ext, ocio_compatible in self.helpers_3d_to_test:
outlutfile = os.path.join(self.tmp_dir, "default_3D" + ext)
args_3d = helper.get_default_preset()
helper.write_3d_lut(self.processor_3d.applyRGB,
outlutfile,
args_3d)
if ocio_compatible:
# create a processor and try it
proc = create_ocio_processor(outlutfile,
interpolation=INTERP_LINEAR)
proc.applyRGB([0, 0, 0])
proc.applyRGB([1, 1, 1])
if DISPLAY:
import plot_that_lut
plot_that_lut.plot_that_lut(outlutfile)
def test_check_attributes(self):
""" Test preset check function
"""
outlutfile = os.path.join(self.tmp_dir, "test.cube")
default_preset = presets.get_default_preset()
CUBE_HELPER.check_preset(default_preset)
# test missing attr
cust_preset = {}
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
for attr in presets.BASIC_ATTRS:
cust_preset[attr] = default_preset[attr]
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
## test specific attr
# change type to 1D
cust_preset[presets.TYPE] = '1D'
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
cust_preset[presets.OUT_BITDEPTH] = 12
CUBE_HELPER.check_preset(cust_preset)
# try to write a 3D LUT with a 1D preset
self.assertRaises(alh.AbstractLUTException,
CUBE_HELPER.write_3d_lut,
self.processor_1d,
outlutfile,
cust_preset)
# change type to 2D
cust_preset[presets.TYPE] = '3D'
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
cust_preset[presets.CUBE_SIZE] = 17
CUBE_HELPER.check_preset(cust_preset)
# try to write a 1D LUT with a 3D preset
self.assertRaises(alh.AbstractLUTException,
CUBE_HELPER.write_1d_lut,
self.processor_1d,
outlutfile,
cust_preset)
# # test value type
# cube size
cust_preset[presets.CUBE_SIZE] = presets.CUBE_SIZE_MAX_VALUE + 1
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset, cust_preset)
cust_preset[presets.CUBE_SIZE] = default_preset[presets.CUBE_SIZE]
# range
tests = 'test', ['a', 'a'], [0.0, 0.5, 1.0], 0.1
for test in tests:
cust_preset[presets.IN_RANGE] = test
self.assertRaises(presets.PresetException,
CUBE_HELPER.check_preset,
cust_preset)
cust_preset[presets.IN_RANGE] = 0.1, 1
CUBE_HELPER.check_preset(cust_preset)
cust_preset[presets.IN_RANGE] = (0.1, 1)
CUBE_HELPER.check_preset(cust_preset)
def test_float_luts(self):
""" Test float LUT transparency
"""
helpers_float_to_test = [(CSP_HELPER, '.csp'),
(SPI_HELPER, '.spi1d')]
colorspace_to_test = [REC709, SGAMUTSLOG, ALEXALOGCV3]
delta = 0.00001
for helper, ext in helpers_float_to_test:
for colorspace in colorspace_to_test:
# define file name
name = colorspace.__class__.__name__
encode_filename = "linTo{0}_1D{1}".format(name, ext)
decode_filename = "{0}ToLin_1D{1}".format(name, ext)
encode_filepath = os.path.join(self.tmp_dir, encode_filename)
decode_filepath = os.path.join(self.tmp_dir, decode_filename)
# set preset
args_1d = CSP_HELPER.get_default_preset()
args_1d[presets.OUT_BITDEPTH] = 16
decode_min = colorspace.decode_gradation(0)
decode_max = colorspace.decode_gradation(1)
args_1d[presets.IN_RANGE] = get_input_range(colorspace,
"encode",
10)
# write encode LUT
helper.write_2d_lut(colorspace.encode_gradation,
encode_filepath,
args_1d)
# write decode LUT
args_1d[presets.IN_RANGE] = get_input_range(colorspace,
"decode",
10)
helper.write_2d_lut(colorspace.decode_gradation,
decode_filepath,
args_1d)
# test transparency
proc = create_ocio_processor(encode_filepath,
postlutfile=decode_filepath,
interpolation=INTERP_LINEAR)
test_values = [[decode_min] * 3,
[decode_max] * 3,
[0] * 3,
[0.5] * 3,
[1] * 3]
for rgb in test_values:
res = proc.applyRGB(rgb)
abs_value = abs(rgb[0] - res[0])
self.assertTrue(abs_value < delta,
"{0} transparency test failed : {1:8f} >"
" acceptable delta ({2:8f})".format(name,
abs_value,
delta)
)
def test_3dl_preset(self):
""" Test 3dl preset
"""
preset = presets.get_default_preset()
# test type must be 3D
self.assertRaises(presets.PresetException,
THREEDL_HELPER.check_preset,
preset
)
preset[presets.TYPE] = '3D'
# test shaper attr exists
self.assertRaises(presets.PresetException,
THREEDL_HELPER.check_preset,
preset
)
preset[SHAPER] = True
# test mesh attr exists
self.assertRaises(presets.PresetException,
THREEDL_HELPER.check_preset,
preset
)
preset[MESH] = True
# test preset is ok
THREEDL_HELPER.check_preset(preset)
# test ranges are int
outlutfile = os.path.join(self.tmp_dir, "test.3dl")
self.assertRaises(PresetException,
THREEDL_HELPER.write_3d_lut,
self.processor_3d.applyRGB,
outlutfile,
preset)
def test_ascii_lut(self):
""" Test ascii 1D / 2D export
"""
colorspace = REC709
# 2D LUT
outlutfile = os.path.join(self.tmp_dir, "default_2D.lut")
preset = ASCII_HELPER.get_default_preset()
ASCII_HELPER.write_2d_lut(colorspace.decode_gradation,
outlutfile,
preset)
# 1D LUT
outlutfile = os.path.join(self.tmp_dir, "default_1D.lut")
preset = ASCII_HELPER.get_default_preset()
ASCII_HELPER.write_1d_lut(colorspace.decode_gradation,
outlutfile,
preset)
# test out bit depth inadequate with output range
preset[OUT_BITDEPTH] = 12
self.assertRaises(AsciiHelperException, ASCII_HELPER.write_1d_lut,
colorspace.decode_gradation, outlutfile, preset)
def test_complete_attributes(self):
""" Test preset complete function
"""
colorspace = REC709
outlutfile = os.path.join(self.tmp_dir, "default_ascii_1D.lut")
default_preset = ASCII_HELPER.get_default_preset()
cust_preset = {}
cust_preset = ASCII_HELPER.complete_preset(cust_preset)
expression = set(default_preset).issubset(set(cust_preset))
self.assertTrue(expression,
("Something went wrong in preset completion :\n"
"Completed preset:\n{0}\nDefault one:\n{1}"
).format(cust_preset, default_preset))
ASCII_HELPER.check_preset(cust_preset)
# try to write a float ascii lut without forcing float mode
cust_preset[presets.IN_RANGE] = [0, 1.0]
self.assertRaises(PresetException, ASCII_HELPER.write_1d_lut,
colorspace.decode_gradation,
outlutfile,
cust_preset)
# force float mode
cust_preset[presets.IS_FLOAT] = True
ASCII_HELPER.write_1d_lut(colorspace.decode_gradation,
outlutfile,
cust_preset)
def tearDown(self):
# Remove test directory
shutil.rmtree(self.tmp_dir)
if __name__ == '__main__':
unittest.main()
| 12,715 | 3,935 |
import argparse
import sys
import logging
import json
def args_parser():
parser = argparse.ArgumentParser(prog='booktracker',
description='book update tracker in python')
parser.add_argument('-f', '--urls_file', type=argparse.FileType('r'), help='a file contains book urls, could be a text file list urls or complex json file for url and attributes', required=False)
parser.add_argument('-l', '--url', type=str, help='a book url to track', required=False)
parser.add_argument('-o', '--output', type=str, help='directory to store book content', required=True)
parser.add_argument('--epub', action='store_true', help='generate epub of book', required=False)
parser.add_argument('--timeout', type=int, help='network request timeout value, default=13s', required=False, default=13)
parser.add_argument('--author', type=str, help='author of the book', required=False, default='')
parser.add_argument('--title', type=str, help='title of the book', required=False, default='')
parser.add_argument('--header', type=str, action='append', help='http request header', required=False, dest='headers')
parser.add_argument('-v', '--verbose', action='count', help='print debug information', required=False, default=0)
return parser
def parse_urls_file_txt(urls_file):
urls = set()
for url in urls_file:
url = url.strip().replace('\n', '').replace('\r', '')
parts = url.split('|')
headers = []
if len(parts) > 3:
headers = '|'.join(parts[3:]).split(',')
urls.add(
(parts[0],
parts[1] if len(parts) > 1 else '',
parts[2] if len(parts) > 2 else '',
tuple(headers))
)
return urls
def parse_urls_file_json(urls_file):
urls = set()
books = json.load(urls_file)
for book in books:
url = book['url'].strip().replace('\n', '').replace('\r', '')
author = book['author'].strip().replace('\n', '').replace('\r', '') if 'author' in book else ''
title = book['title'].strip().replace('\n', '').replace('\r', '') if 'title' in book else ''
headers = book['headers'] if 'headers' in book else []
logging.debug('url:%s, author:%s, title:%s, headers:%s',
url, author, title, headers)
urls.add((url, author, title, tuple(headers)))
return urls
if __name__ == '__main__':
parser = args_parser().parse_args()
if parser.verbose >= 1:
logging.getLogger('').setLevel(logging.DEBUG)
if parser.urls_file is None and parser.url is None:
args_parser().print_usage()
sys.exit()
urls = set()
if parser.urls_file:
try:
urls = parse_urls_file_json(parser.urls_file)
except:
logging.exception('urls file:%s is not json try text file', parser.urls_file)
parser.urls_file.seek(0)
urls = parse_urls_file_txt(parser.urls_file)
if parser.url:
urls.add((parser.url,
parser.author,
parser.title,
tuple(parser.headers) if parser.headers else tuple([]))
)
for url, author, title, headers in sorted(urls):
try:
if url.find('piaotian') > 0 or url.find('ptwxz') > 0:
from piaotian.book_tracker import Tracker as PiaoTianTracker
tracker = PiaoTianTracker(url, author, title, parser.output, parser.timeout)
elif url.find('23us') > 0:
from dingdian.book_tracker import Tracker as DingDianTracker
tracker = DingDianTracker(url, author, title, parser.output, parser.timeout)
elif url.find('youdubook') > 0:
from youdu.book_tracker import Tracker as YouduTracker
tracker = YouduTracker(url, author, title, parser.output, parser.timeout)
elif url.find('shuku') > 0:
from shuku.book_tracker import Tracker as ShuKuTracker
tracker = ShuKuTracker(url, author, title, parser.output, parser.timeout)
elif url.find('uukanshu') > 0:
from uukanshu.book_tracker import Tracker as UUKanShuTracker
tracker = UUKanShuTracker(url, author, title, parser.output, parser.timeout)
if not tracker:
raise ValueError("tracker not found")
tracker.headers = list(headers)
update_count = tracker.refresh()
print(tracker.title, 'update count:', update_count)
if parser.epub:
tracker.gen_epub()
except:
logging.exception("update failed:{}".format(url))
| 4,704 | 1,388 |
import math
def rectangle_area(b=None, h=None):
if b is None or b is None:
print("Error wrong parameters")
return
return b * h
def circle_area(radium):
return (radium ** 2) * math.pi
print(circle_area(5))
def intermediate_number(a, b):
return (a + b) / 2
print(intermediate_number(-24, 24))
def separate(list_to_separate):
list_to_separate.sort()
evens_list = []
odds_list = []
for n in list_to_separate:
if n % 2 == 0:
evens_list.append(n)
else:
odds_list.append(n)
return evens_list, odds_list
evens, odds = separate([6, 5, 2, 1, 7])
print(evens)
print(odds)
| 667 | 262 |
#!/usr/bin/env python
#
# Copyright (c) 2010, iPlant Collaborative, University of Arizona, Cold Spring Harbor Laboratories, University of Texas at Austin
# This software is licensed under the CC-GNU GPL version 2.0 or later.
# License: http://creativecommons.org/licenses/GPL/2.0/
#
# Author: Seung-jin Kim
# Contact: seungjin@email.arizona.edu
# Twitter: @seungjin
#
import logging
import httplib
import urllib
from urlparse import urlparse
import string
import datetime
from django.http import HttpResponse
from django.template import Context
from django.template.loader import get_template
from django.http import HttpResponse, Http404
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.contrib.auth import logout
from django.http import HttpResponseNotFound
from django.http import HttpResponseForbidden
from django.utils import simplejson
from atmosphere.cloudfront.models import *
def getToken(request, username, password):
auth_server_url_obj = Configs.objects.filter(key="auth_server_url").order_by('value')[0]
auth_server_url = auth_server_url_obj.value
o = urlparse(auth_server_url)
auth_server_url = string.split(o.netloc,":")[0]
auth_server_port = int(string.split(o.netloc,":")[1])
auth_server_path = o.path
method = "GET"
params = None
headers = {
"Content-type" : "application/x-www-form-urlencoded",
"Accept" : "text/plain",
"X-Auth-User" : username,
"X-Auth-Key" : password,
"User-Agent" : "Atmo/CloudFront"
}
conn = httplib.HTTPSConnection(auth_server_url,auth_server_port)
conn.request(method,auth_server_path,params,headers)
r1 = conn.getresponse()
headers = r1.getheaders()
conn.close()
api_service_url = None
api_service_token = None
for header in headers:
if header[0] == "x-server-management-url" :
api_service_url = header[1]
if header[0] == "x-auth-token" :
api_service_token = header[1]
issued_token = Tokens(username = username, x_auth_token = api_service_token, x_server_management_url = api_service_url, issued_at = datetime.datetime.now())
issued_token.save()
request.session['username'] = username
request.session['token'] = api_service_token
request.session['api_server'] = api_service_url
return True
def request(request,method):
# emulating
# ./resource_request seungjin e1463572-517a-41c7-a43c-5a3eb884562e GET http://bond.iplantcollaborative.org:8000/resources/v1/getImageList
if request.session.has_key('username') == False:
return HttpResponseForbidden('HTTP/1.0 401 UNAUTHORIZED')
username = request.session['username']
token = request.session['token']
method_type = str(request.META['REQUEST_METHOD'])
resource_url = request.session['api_server'] + "/" + method
o = urlparse(resource_url)
protocol = o.scheme
url = string.split(o.netloc,":")[0]
port = int(string.split(o.netloc,":")[1])
path = o.path + "/"
params = None
if str(method_type).upper() == "GET" :
params = '&'.join( [ u"%s=%s"%(f,v) for f,v in request.GET.iteritems() if f])
elif str(method_type).upper() == "POST":
params = '&'.join( [ u"%s=%s"%(f,v) for f,v in request.POST.iteritems() if f])
headers = {
"Content-type" : "application/x-www-form-urlencoded",
"Accept" : "text/plain",
"X-Auth-User" : username,
"X-Auth-Token" : token,
"X-Api-Server" : request.session['api_server'] + "/",
"X-Api-Version" : "v1",
"User-Agent" : "Atmo/CloudFront"
}
logging.debug(params)
conn = httplib.HTTPSConnection(url,port)
conn.request("POST",path,params,headers)
r1 = conn.getresponse()
return r1.read()
| 3,647 | 1,285 |
import json
import os
classlist = os.listdir("./image/")
for classname in classlist:
# "./Classification/"+classname+"/"
try: os.mkdir("./Classification/"+classname+"/")
except: pass
filenamelist = os.listdir("./image/"+classname)
url = "https://cdn.jsdelivr.net/gh/2x-ercha/twikoo-magic/image/" + classname + "/"
with open("./Classification/"+classname+"/README.md", "w", encoding="utf-8") as f:
f.write(classname+"\n\n")
for filename in filenamelist:
f.write("\n")
| 554 | 203 |
#!/usr/bin/env python3
import sys
from multiprocessing import Queue,Process,Lock
from datetime import datetime
import getopt
import configparser
class Config(object):
def __init__(self,filename,arg='DEFAULT'):
self._filename = filename
self._arg = arg
self._obj = configparser.ConfigParser(strict=False)
self._obj.read(self._filename)
@property
def basel(self):
return self._obj.getfloat(self._arg,'JiShuL')
@property
def baseh(self):
return self._obj.getfloat(self._arg,'JiShuH')
@property
def soinsurp(self):
sum = 0
for i in ['YangLao','GongJiJin','ShengYu','GongShang','ShiYe','YiLiao']:
sum += self._obj.getfloat(self._arg,i)
return sum
class UserData(object):
def __init__(self,userdatafile):
self._userdatafile = userdatafile
@property
def userdata(self):
userdata = {}
with open(self._userdatafile) as file:
for line in file:
s = line.split(',')
fkey = s[0].strip()
fvalue = s[1].strip()
userdata[fkey] = float(fvalue)
return userdata
class Salary(object):
#bftax is salary before the pitax
#soinsurp is socail insur pecentage
#basel is the lowest base
#baseh is the hightest base
def __init__(self,bftax,soinsurp,basel,baseh):
self._bftax = bftax
self._soinsurp = soinsurp
self._basel = basel
self._baseh = baseh
@property
def soinsur(self):
if self._bftax <= self._basel:
return self._basel * self._soinsurp
elif self._bftax >= self._baseh:
return self._baseh * self._soinsurp
else:
return self._bftax * self._soinsurp
@property
def pitax(self):
taxbase = self._bftax - self.soinsur - 3500
if taxbase <= 0:
return 0
elif taxbase > 0 and taxbase <= 1500:
return taxbase * 0.03
elif taxbase > 1500 and taxbase <= 4500:
return (taxbase * 0.1 - 105)
elif taxbase > 4500 and taxbase <= 9000:
return (taxbase * 0.2 - 555)
elif taxbase > 9000 and taxbase <= 35000:
return (taxbase * 0.25 - 1005)
elif taxbase > 35000 and taxbase <= 55000:
return (taxbase * 0.3 - 2755)
elif taxbase > 55000 and taxbase <= 80000:
return (taxbase * 0.35 - 5505)
else:
return (taxbase * 0.45 - 13505)
@property
def aftax(self):
return self._bftax - self.soinsur - self.pitax
que1 = Queue()
que2 = Queue()
def putda_func(arg,lock):
#
user_inst = UserData(arg)
g = [ (k,v) for k,v in\
user_inst.userdata.items()]
for i in g:
with lock:
que1.put(i)
def comp_func(soinsurp,basel,baseh,lock):
while True:
i = que1.get()
bftax = i[1]
salary = Salary(bftax,soinsurp,basel,baseh)
sal_list = [i[0],i[1],salary.soinsur,salary.pitax,\
salary.aftax]
with lock:
que2.put(sal_list)
if que1.empty():
break
def outfi_func(arg):
while True:
lis = que2.get()
with open(arg,'a') as file:
file.write(lis[0])
for i in lis[1:]:
file.write(','+'{:.2f}'.format(i))
t = datetime.now()
t_str = datetime.strftime(t,'%Y-%m-%d %H:%M:%S')
file.write(',' + t_str)
file.write('\n')
if que2.empty():
break
def usage():
line ='Usage: ' + sys.argv[0] + ' -C cityname -c configfile -d userdata -o resultdata'
print(line)
def main():
try:
opts,args = getopt.getopt(sys.argv[1:],'ho:d:C:c:',['help',])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
cityname = 'DEFAULT'
userfile = None
configfile = None
outfile = None
try:
for o,a in opts:
if o in ('-h','--help'):
usage()
sys.exit()
if o == '-o':
outfile = a
elif o == '-C':
cityname = a
elif o == '-d':
userfile = a
elif o == '-c':
configfile = a
else:
raise NameError
config = Config(configfile,cityname.upper())
lo1 = Lock()
lo2 = Lock()
Process(target=putda_func,args=(userfile,lo1)).start()
Process(target=comp_func, args=(config.soinsurp,\
config.basel,config.baseh,lo2)).start()
Process(target=outfi_func, args=(outfile,)).start()
except NameError as err:
usage()
print(err)
sys.exit(2)
if __name__ == '__main__':
main()
| 4,964 | 1,698 |
#!/usr/bin/env python3
import argparse
import yaml
import pathlib
import decimal
import datetime
import os
decimal.getcontext().prec = 10
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='path to data directory', required=True)
args = parser.parse_args()
script_path = os.path.dirname(os.path.realpath(__file__))
config_path = script_path + '/../config'
# Configuration
config = {}
with open(config_path + '/tax.yaml') as f:
config['tax'] = yaml.safe_load(f.read())
# Find current tax year
today = datetime.date.today()
config['current_tax'] = next(x for x in config['tax'] if x['start_date'] <= today and x['end_date'] >= today)
# Data
total_sales = decimal.Decimal(0.00)
total_payments = decimal.Decimal(0.00)
data_directory = str(args.data)
data_path = pathlib.Path(data_directory)
invoice_files = list(data_path.glob('data/invoices/*.yaml'))
for invoice_file in invoice_files:
fp = invoice_file.open()
invoice_data = yaml.safe_load(fp.read())
fp.close()
if invoice_data['issue_date'] >= config['current_tax']['start_date'] and invoice_data['issue_date'] <= config['current_tax']['end_date'] and invoice_data['issue_date'] <= today:
print(invoice_data['number'])
total_sales += decimal.Decimal(invoice_data['total'])
print(invoice_data['total'])
# Subtract any payments from accounts receivable
if 'payments' in invoice_data:
for payment in invoice_data['payments']:
print(payment['amount'])
total_payments += decimal.Decimal(payment['amount'])
print()
print("Total sales: %.2f" % total_sales)
print("Total payments: %.2f" % total_payments)
# Calculate tax and national insurance
| 1,730 | 562 |
# -*- coding: utf-8 -*-
#for local
#import config
#config.write_environ()
import os,json
from flask import Flask, render_template, request, redirect, url_for, session
from requests_oauthlib import OAuth1Session
from datetime import timedelta
import twitter_auth
import twitter_delete
import postTweet
import databaseIO
app = Flask(__name__)
app.secret_key = os.environ['APP_SECRET_KEY']
app.permanent_session_lifetime = timedelta(minutes=5)
#session.permanent = True
#scheduler = BackgroundScheduler(daemon = True)
##################################################################
## トークン関連
CK = os.environ.get('CONSUMER_KEY', '0')
CS = os.environ.get('CONSUMER_SECRET', '0')
##################################################################
is_verified = False
name = ""
screen_name = ""
w = ('stop','running')
@app.route('/')
def index():
session['is_verified'] = False
session['auth_process'] = False
return render_template('index.html')
@app.route('/authorize')
def authorize():
session['auth_process'] = True
authorize_endpoint = twitter_auth.user_authorize()
return redirect(authorize_endpoint)
@app.route('/authenticate')
def authenticate():
session['auth_process'] = True
authenticate_endpoint = twitter_auth.user_authenticate()
return redirect(authenticate_endpoint)
#return #render_template('tweet.html',message=message,title=title)
"""@app.route('/verified')
def verified():
is_verified,name,screen_name = twitter_auth.user_verified()
#return redirect('http://127.0.0.1:5000/')
return render_template('verified.html',is_verified = is_verified,name=name,screen_name=screen_name)
@app.route('/setting_authenticate')
def authenticate():
authenticate_url = twitter_auth.user_authenticate_setting()
return redirect(authenticate_url)
#return #render_template('tweet.html',message=message,title=title)
"""
@app.route('/setting', methods=['GET','POST'])
def setting():
global is_verified, name, screen_name
user_id = ""
if session.get('is_verified') != True:
session['is_verified'] = False
if session.get('auth_process') != True:
print("no auth_process")
session['auth_process'] = False
if session['auth_process'] == True :
try:
twitter_auth.user_verified()
print("verify success")
session['auth_process'] = False
except:
print("verify failed")
session['auth_process'] = False
return render_template('setting.html',is_verified = False)
else:
if session['is_verified'] == True:
#設定保存時
if(request.form["work"]=='running'):
work_value = 1
else:
work_value = 0
databaseIO.set_value(session['user_id'], work_value, request.form["deletetime"])
print(request.form["work"])
print(request.form["deletetime"])
#param = json.loads(request.data.decode('utf-8'))
#print(param["work"])
#print(param.get('deletetime'))
print("verified")
else:
print("invalid transition")
session['auth_process'] = False
return render_template('setting.html',is_verified = False)
user_id = session['user_id']
userinfo = databaseIO.get_value(user_id)
is_verified = session['is_verified']
name = session['name']
screen_name = session['screen_name']
work = userinfo[3]
delete_time = userinfo[4]
print(name)
return render_template('setting.html',is_verified = is_verified,name=name,screen_name=screen_name,work=w[work],delete_time=delete_time)
@app.route('/delete', methods=['GET','POST'])
def delete():
if request.method == 'POST':
databaseIO.auth_deleteuser(session['user_id'])
print("delete")
return render_template('delete.html',deleted=True)
else:
return render_template('delete.html',deleted=False)
return render_template('delete.html',deleted=False)
if __name__ == '__main__':
#app.debug = True
app.run(threaded=True)
| 4,131 | 1,265 |
from PySide.QtGui import QKeySequence
from PySide.QtCore import Qt
from .menu import Menu, MenuEntry, MenuSeparator
class DisasmInsnContextMenu(Menu):
def __init__(self, disasm_view):
super(DisasmInsnContextMenu, self).__init__("", parent=disasm_view)
self.insn_addr = None
self.entries.extend([
MenuEntry('T&oggle selection', self._toggle_instruction_selection),
MenuSeparator(),
MenuEntry('E&xecute symbolically...', self._disasm_view.popup_newpath_dialog),
MenuEntry('&Avoid in execution...', self._avoid_in_execution)
])
@property
def _disasm_view(self):
return self.parent
def _toggle_instruction_selection(self): self._disasm_view.toggle_instruction_selection(self.insn_addr)
def _avoid_in_execution(self): self._disasm_view.avoid_addr_in_exec(self.insn_addr)
| 882 | 279 |
"""
shades
contains classes and functions relating to Shades' shade object
"""
from abc import ABC, abstractmethod
from typing import Tuple, List
import numpy as np
from PIL import Image
from .noise_fields import NoiseField, noise_fields
from .utils import color_clamp
class Shade(ABC):
"""
An Abstract base clase Shade.
Methods are used to mark shapes onto images according to various color rules.
Initialisation parameters of warp_noise takes two noise_fields affecting how
much a point is moved across x and y axis.
warp_size determines the amount that a warp_noise result of 1 (maximum perlin
value) translates as
"""
def __init__(
self,
color: Tuple[int, int, int] = (0, 0, 0),
warp_noise: Tuple[NoiseField] = noise_fields(channels=2),
warp_size: float = 0,
):
self.color = color
self.warp_noise = warp_noise
self.warp_size = warp_size
@abstractmethod
def determine_shade(self, xy_coords: Tuple[int, int]) -> Tuple[int, int, int]:
"""
Determines the shade/color for given xy coordinate.
"""
def adjust_point(self, xy_coords: Tuple[int, int]) -> Tuple[int, int]:
"""
If warp is applied in shade, appropriately adjusts location of point.
"""
if self.warp_size == 0:
return xy_coords
x_coord = xy_coords[0] + (self.warp_noise[0].noise(xy_coords) * self.warp_size)
y_coord = xy_coords[1] + (self.warp_noise[1].noise(xy_coords) * self.warp_size)
return (x_coord, y_coord)
def point(self, canvas: Image, xy_coords: Tuple[int, int]) -> None:
"""
Determines colour and draws a point on an image.
"""
color = self.determine_shade(xy_coords)
if color is None:
return
xy_coords = self.adjust_point(xy_coords)
if self.in_bounds(canvas, xy_coords):
canvas.putpixel((int(xy_coords[0]), int(xy_coords[1])), color)
def in_bounds(self, canvas: Image, xy_coords: Tuple[int, int]) -> bool:
"""
determined whether xy_coords are within the size of canvas image
"""
if (xy_coords[0] < 0) or (xy_coords[0] >= canvas.width):
return False
if (xy_coords[1] < 0) or (xy_coords[1] >= canvas.height):
return False
return True
def weighted_point(self, canvas: Image, xy_coords: Tuple[int, int], weight: int):
"""
Determines colour and draws a weighted point on an image.
"""
color = self.determine_shade(xy_coords)
if self.warp_size != 0:
xy_coords = self.adjust_point(xy_coords)
for x_coord in range(0, weight):
for y_coord in range(0, weight):
new_point = (int(xy_coords[0]+x_coord), int(xy_coords[1]+y_coord))
if self.in_bounds(canvas, new_point):
canvas.putpixel(new_point, color)
def pixels_inside_edge(self, edge_pixels: List) -> List:
"""
Returns a list of pixels from inside a edge of points using ray casting algorithm
https://en.wikipedia.org/wiki/Point_in_polygon
vertex correction requires improvements, unusual or particularly angular shapes may
cause difficulties
"""
inner_pixels = []
x_coords = {i[0] for i in edge_pixels}
for x_coord in range(min(x_coords), max(x_coords)+1):
y_coords = {i[1] for i in edge_pixels if i[0] == x_coord}
y_coords = [i for i in y_coords if i-1 not in y_coords]
ray_count = 0
for y_coord in range(min(y_coords), max(y_coords)+1):
if y_coord in y_coords and (x_coord, y_coord):
ray_count += 1
if ray_count % 2 == 1:
inner_pixels.append((x_coord, y_coord))
return list(set(inner_pixels + edge_pixels))
def pixels_between_two_points(self, xy_coord_1: Tuple, xy_coord_2: Tuple) -> List:
"""
Returns a list of pixels that form a straight line between two points.
Parameters:
xy_coord_1 (int iterable): Coordinates for first point.
xy_coord_2 (int iterable): Coordinates for second point.
Returns:
pixels (int iterable): List of pixels between the two points.
"""
if abs(xy_coord_1[0] - xy_coord_2[0]) > abs(xy_coord_1[1] - xy_coord_2[1]):
if xy_coord_1[0] > xy_coord_2[0]:
x_step = -1
else:
x_step = 1
y_step = (abs(xy_coord_1[1] - xy_coord_2[1]) / abs(xy_coord_1[0] - xy_coord_2[0]))
if xy_coord_1[1] > xy_coord_2[1]:
y_step *= -1
i_stop = abs(xy_coord_1[0] - xy_coord_2[0])
else:
if xy_coord_1[1] > xy_coord_2[1]:
y_step = -1
else:
y_step = 1
x_step = (abs(xy_coord_1[0] - xy_coord_2[0]) / abs(xy_coord_1[1] - xy_coord_2[1]))
if xy_coord_1[0] > xy_coord_2[0]:
x_step *= -1
i_stop = abs(xy_coord_1[1]-xy_coord_2[1])
pixels = []
x_coord, y_coord = xy_coord_1
for _ in range(0, int(i_stop) + 1):
pixels.append((int(x_coord), int(y_coord)))
x_coord += x_step
y_coord += y_step
return pixels
def line(
self,
canvas: Image,
xy_coords_1: Tuple[int, int],
xy_coords_2: Tuple[int, int],
weight: int = 2,
) -> None:
"""
Draws a weighted line on the image.
"""
for pixel in self.pixels_between_two_points(xy_coords_1, xy_coords_2):
self.weighted_point(canvas, pixel, weight)
def fill(self, canvas: Image) -> None:
"""
Fills the entire image with color.
"""
# we'll temporarily turn off warping as it isn't needed here
warp_size_keeper = self.warp_size
self.warp_size = 0
for x_coord in range(0, canvas.width):
for y_coord in range(0, canvas.height):
self.point(canvas, (x_coord, y_coord))
#[[self.point(canvas, (x, y)) for x in range(0, canvas.width)]
# for y in range(0, canvas.height)]
self.warp_size = warp_size_keeper
def get_shape_edge(self, list_of_points: List[Tuple[int, int]]) -> List[Tuple]:
"""
Returns list of coordinates making up the edge of a shape
"""
edge = self.pixels_between_two_points(
list_of_points[-1], list_of_points[0])
for i in range(0, len(list_of_points)-1):
edge += self.pixels_between_two_points(
list_of_points[i], list_of_points[i+1])
return edge
def shape(self, canvas: Image, points: List[Tuple[int, int]]) -> None:
"""
Draws a shape on an image based on a list of points.
"""
edge = self.get_shape_edge(points)
for pixel in self.pixels_inside_edge(edge):
self.point(canvas, pixel)
def shape_outline(
self,
canvas: Image,
points: List[Tuple[int, int]],
weight: int = 2,
) -> None:
"""
Draws a shape outline on an image based on a list of points.
"""
for pixel in self.get_shape_edge(points):
self.weighted_point(canvas, pixel, weight)
def rectangle(
self,
canvas: Image,
top_corner: Tuple[int, int],
width: int,
height: int,
) -> None:
"""
Draws a rectangle on the image.
"""
for x_coord in range(top_corner[0], top_corner[0] + width):
for y_coord in range(top_corner[1], top_corner[1] + height):
self.point(canvas, (x_coord, y_coord))
def square(
self,
canvas: Image,
top_corner: Tuple[int, int],
size: int,
) -> None:
"""
Draws a square on the canvas
"""
self.rectangle(canvas, top_corner, size, size)
def triangle(
self,
canvas,
xy1: Tuple[int, int],
xy2: Tuple[int, int],
xy3: Tuple[int, int],
) -> None:
"""
Draws a triangle on the image.
This is the same as calling Shade.shape with a list of three points.
"""
self.shape(canvas, [xy1, xy2, xy3])
def triangle_outline(
self,
canvas,
xy1: Tuple[int, int],
xy2: Tuple[int, int],
xy3: Tuple[int, int],
weight: int = 2,
) -> None:
"""
Draws a triangle outline on the image.
Note that this is the same as calling Shade.shape_outline with a list of three points.
"""
self.shape_outline(canvas, [xy1, xy2, xy3], weight)
def get_circle_edge(
self,
center: Tuple[int, int],
radius: int,
) -> List[Tuple[int, int]]:
"""
Returns the edge coordinates of a circle
"""
edge_pixels = []
circumference = radius * 2 * np.pi
for i in range(0, int(circumference)+1):
angle = (i/circumference) * 360
opposite = np.sin(np.radians(angle)) * radius
adjacent = np.cos(np.radians(angle)) * radius
point = (int(center[0] + adjacent), int(center[1] + opposite))
edge_pixels.append(point)
return edge_pixels
def circle(
self,
canvas: Image,
center: Tuple[int, int],
radius: int,
) -> None:
"""
Draws a circle on the image.
"""
edge_pixels = self.get_circle_edge(center, radius)
for pixel in self.pixels_inside_edge(edge_pixels):
self.point(canvas, pixel)
def circle_outline(
self,
canvas: Image,
center: Tuple[int, int],
radius: int,
weight: int = 2,
) -> None:
"""
Draws a circle outline on the image.
"""
edge_pixels = self.get_circle_edge(center, radius)
for pixel in edge_pixels:
self.weighted_point(canvas, pixel, weight)
def circle_slice(
self,
canvas: Image,
center: Tuple[int, int],
radius: int,
start_angle: int,
degrees_of_slice: int,
) -> None:
"""
Draws a partial circle based on degrees.
(will have the appearance of a 'pizza slice' or 'pacman' depending on degrees).
"""
# due to Shade.pixels_between_two_points vertex correction issues,
# breaks down shape into smaller parts
def _internal(canvas, center, radius, start_angle, degrees_of_slice):
circumference = radius * 2 * np.pi
start_point = int(
(((start_angle - 90) % 361) / 360) * circumference)
slice_length = int((degrees_of_slice / 360) * circumference)
end_point = start_point + slice_length
edge_pixels = []
for i in range(start_point, end_point + 1):
angle = (i/circumference) * 360
opposite = np.sin(np.radians(angle)) * radius
adjacent = np.cos(np.radians(angle)) * radius
point = (int(center[0] + adjacent), int(center[1] + opposite))
edge_pixels.append(point)
if i in [start_point, end_point]:
edge_pixels += self.pixels_between_two_points(point, center)
for pixel in self.pixels_inside_edge(edge_pixels):
self.point(canvas, pixel)
if degrees_of_slice > 180:
_internal(canvas, center, radius, start_angle, 180)
_internal(canvas, center, radius, start_angle +
180, degrees_of_slice - 180)
else:
_internal(canvas, center, radius, start_angle, degrees_of_slice)
class BlockColor(Shade):
"""
Type of shade that will always fill with defined color without variation.
"""
def determine_shade(self, xy_coords: Tuple[int, int]) -> Tuple[int, int, int]:
"""
Ignores xy coordinates and returns defined color.
"""
return self.color
class NoiseGradient(Shade):
"""
Type of shade that will produce varying gradient based on noise fields.
Unique Parameters:
color_variance: How much noise is allowed to affect the color from the central shade
color_fields: A noise field for each channel (r,g,b)
"""
def __init__(
self,
color: Tuple[int, int, int] = (0, 0, 0),
warp_noise: Tuple[NoiseField, NoiseField, NoiseField] = noise_fields(channels=3),
warp_size: int = 0,
color_variance: int = 70,
color_fields: Tuple[NoiseField, NoiseField, NoiseField] = noise_fields(channels=3),
):
super().__init__(color, warp_noise, warp_size)
self.color_variance = color_variance
self.color_fields = tuple(color_fields)
def determine_shade(self, xy_coords: Tuple[int, int]) -> Tuple[int, int, int]:
"""
Measures noise from coordinates and affects color based upon return.
"""
def apply_noise(i):
noise = self.color_fields[i].noise(xy_coords) - 0.5
color_affect = noise * (2*self.color_variance)
return self.color[i] + color_affect
return color_clamp([apply_noise(i) for i in range(len(self.color))])
class DomainWarpGradient(Shade):
"""
Type of shade that will produce varying gradient based on recursive noise fields.
Unique Parameters:
color_variance: How much noise is allowed to affect the color from the central shade
color_fields: A noise field for each channel (r,g,b)
depth: Number of recursions within noise to make
feedback: Affect of recursive calls, recomended around 0-2
"""
def __init__(
self,
color: Tuple[int, int, int] = (0, 0, 0),
warp_noise: Tuple[NoiseField, NoiseField] = noise_fields(channels=2),
warp_size: int = 0,
color_variance: int = 70,
color_fields: Tuple[NoiseField, NoiseField, NoiseField] = noise_fields(channels=3),
depth: int = 2,
feedback: float = 0.7,
):
super().__init__(color, warp_noise, warp_size)
self.color_variance = color_variance
self.color_fields = tuple(color_fields)
self.depth = depth
self.feedback = feedback
def determine_shade(self, xy_coords: Tuple[int, int]) -> Tuple[int, int, int]:
"""
Determines shade based on xy coordinates.
"""
def apply_noise(i):
noise = self.color_fields[i].recursive_noise(
xy_coords, self.depth, self.feedback) - 0.5
color_affect = noise * (2*self.color_variance)
return self.color[i] + color_affect
return color_clamp([apply_noise(i) for i in range(len(self.color))])
class SwirlOfShades(Shade):
"""
Type of shade that will select from list of other shades based on recursive noise field.
Unique Parameters:
swirl_field: a NoiseField from which the selection of the shade is made
depth: Number of recursive calls to make from swirl_field.noise (defaults to 0)
feedback: Affect of recursive calls from swirl_field.noise
shades: this one is very specific, and determines when shades are used.
must be list of tuples of this form:
(lower_bound, upper_bound, Shade)
because the 'shades' arguments potentially confusing, here's an example.
The below will color white when noise of 0 - 0.5 is returned, and black if noise of 0.5 - 1
[(0, 0.5, shades.BlockColor((255, 255, 255)), (0.5, 1, shades.BlockColor((0, 0, 0)))]
"""
def __init__(
self,
shades: List[Tuple[float, float, Shade]],
warp_noise: Tuple[NoiseField, NoiseField] = noise_fields(channels=2),
warp_size: int = 0,
color_variance: int = 70,
swirl_field: NoiseField = NoiseField(),
depth: int = 1,
feedback: float = 0.7,
):
super().__init__(warp_noise=warp_noise, warp_size=warp_size)
self.color_variance = color_variance
self.swirl_field = swirl_field
self.depth = depth
self.feedback = feedback
self.shades = shades
def determine_shade(self, xy_coords: Tuple[int, int]):
"""
Determines shade based on xy coordinates.
"""
noise = self.swirl_field.recursive_noise(xy_coords, self.depth, self.feedback)
shades = [i for i in self.shades if i[0] <= noise < i[1]]
if len(shades) > 0:
shade = shades[0][2]
return shade.determine_shade(xy_coords)
return None
class LinearGradient(Shade):
"""
Type of shade that will determine color based on transition between various 'color_points'
Unique Parameters:
color_points: Groups of colours and coordinate at which they should appear
axis: 0 for horizontal gradient, 1 for vertical
Here's an example of color_points
in this, anything before 50 (on whichever axis specified) will be black,
anything after 100 will be white
between 50 and 100 will be grey, with tone based on proximity to 50 or 100
[((0, 0, 0), 50), ((250, 250, 250), 100)]
"""
def __init__(
self,
color_points: List[Tuple[int, Tuple[int, int, int]]],
axis: int = 0,
warp_noise: Tuple[NoiseField, NoiseField] = noise_fields(channels=2),
warp_size: int = 0,
):
super().__init__(warp_noise=warp_noise, warp_size=warp_size)
self.color_points = color_points
self.axis = axis
def determine_shade(self, xy_coords):
"""
Determines shade based on xy coordinates.
Parameters:
xy (iterable): xy coordinates
Returns:
color in form of tuple
"""
larger = [i[1] for i in self.color_points if i[1] >= xy_coords[self.axis]]
smaller = [i[1] for i in self.color_points if i[1] < xy_coords[self.axis]]
if len(smaller) == 0:
next_item = min(larger)
next_color = [i[0] for i in self.color_points if i[1] == next_item][0]
return next_color
if len(larger) == 0:
last_item = max(smaller)
last_color = [i[0] for i in self.color_points if i[1] == last_item][0]
return last_color
next_item = min(larger)
last_item = max(smaller)
next_color = [i[0] for i in self.color_points if i[1] == next_item][0]
last_color = [i[0] for i in self.color_points if i[1] == last_item][0]
distance_from_next = abs(next_item - xy_coords[self.axis])
distance_from_last = abs(last_item - xy_coords[self.axis])
from_last_to_next = distance_from_last / (distance_from_next + distance_from_last)
color = [0 for i in len(next_color)]
for i, _ in enumerate(next_color):
color_difference = (
last_color[i] - next_color[i]) * from_last_to_next
color[i] = last_color[i] - color_difference
return color_clamp(color)
class VerticalGradient(LinearGradient):
"""
Type of shade that will determine color based on transition between various 'color_points'
Unique Parameters:
color_points: Groups of colours and coordinate at which they should appear
Here's an example of color_points
in this, anything before 50 (on y axis) will be black,
anything after 100 will be white
between 50 and 100 will be grey, with tone based on proximity to 50 or 100
"""
def __init__(
self,
color_points: List[Tuple[int, Tuple[int, int, int]]],
warp_noise: Tuple[NoiseField, NoiseField] = noise_fields(channels=2),
warp_size: int = 0,
):
super().__init__(
color_points=color_points,
axis=1,
warp_noise=warp_noise,
warp_size=warp_size,
)
class HorizontalGradient(LinearGradient):
"""
Type of shade that will determine color based on transition between various 'color_points'
Unique Parameters:
color_points: Groups of colours and coordinate at which they should appear
Here's an example of color_points
in this, anything before 50 (on x axis) will be black,
anything after 100 will be white
between 50 and 100 will be grey, with tone based on proximity to 50 or 100
"""
def __init__(self,
color_points: List[Tuple[int, Tuple[int, int, int]]],
warp_noise: Tuple[NoiseField, NoiseField] = noise_fields(channels=2),
warp_size: int = 0,
):
super().__init__(
color_points=color_points,
axis=0,
warp_noise=warp_noise,
warp_size=warp_size,
)
| 21,209 | 6,890 |
def CA(file):
"""correspondence analysis.
Args:
file (directory): csv file contains genes' RSCU values
Returns:
- csv file contains genes' values for the first 4 axes of the correspondence analysis result
- csv file contains codons' values for the first 4 axes of the correspondence analysis result
- plot the genes first 2 axes values of the correspondence analysis result
- plot the codons first 2 axes values of the correspondence analysis result
"""
import pandas as pd
import prince
import matplotlib.pyplot as plt
file = str(file)
df = pd.read_csv(file)
df.set_index(df.iloc[:,0] , inplace=True)# to make the first column is the index
df.drop(df.columns[0], axis=1,inplace= True)
df.replace(0,0.0000001,inplace=True)
#with prince # make onle CA for 2 axis
ca = prince.CA(
n_components=4,
n_iter=3,
copy=True,
check_input=True,
engine='auto',
random_state=42
)
df.columns.rename('Gene Name', inplace=True)
df.index.rename('Codons', inplace=True)
ca = ca.fit(df)
codons = ca.row_coordinates(df) # for Codons
genes = ca.column_coordinates(df) #for genes
#ca.eigenvalues_
ca.total_inertia_ #total inertia
ca.explained_inertia_ #inertia for each axis
inertia = ca.explained_inertia_
#save information
file_genes = file.replace(".csv",'')
file_genes = file_genes + "genes"
file_genes = file_genes + ".csv"
genes.rename(columns={genes.columns[0]: 'axis 1', genes.columns[1]: 'axis 2', genes.columns[2]: 'axis 3', genes.columns[3]: 'axis 4'}, inplace=True)
genes.to_csv(file_genes,sep=',', index=True, header=True) # return csv file for genes ca result
file_codons = file.replace(".csv",'')
file_codons = file_codons+ "codons"
file_codons = file_codons + ".csv"
codons.rename(columns={codons.columns[0]: 'axis 1', codons.columns[1]: 'axis 2', codons.columns[2]: 'axis 3', codons.columns[3]: 'axis 4'},inplace=True)
codons.to_csv(file_codons, sep=',', index=True, header=True) # return csv file for codon ca result
file_inertia = file.replace('.csv','.txt')
with open(file_inertia, 'a') as f:
f.write("explained inertia" + "\n")
for i in range(len(inertia)):
i_count = i + 1
with open(file_inertia,'a') as f:
f.write ("axis " + str(i_count) + " = " + str(inertia[i]) + "\n" )
with open(file_inertia,'a') as f:
f.write("Total Inertia = " + str(ca.total_inertia_))
#plot For genes
plt.style.use('seaborn-dark-palette')
fig = plt.figure()
plt.xlabel("Axis 1")
plt.ylabel("Axis 2")
plt.title("CA-plot")
plt.scatter(genes['axis 1'],genes['axis 2'],s=10,marker ='o')
plt.axhline(0, color='black', linestyle='-')
plt.axvline(0, color='black', linestyle='-')
save_file_name__ca_plot = file + "_CA_gens_plot.png"
plt.savefig(save_file_name__ca_plot) # return plot file for gene ca result
#for codons
plt.style.use('seaborn-dark-palette')
fig3 = plt.figure()
plt.xlabel("Axis 1")
plt.ylabel("Axis 2")
plt.title("CA-plot")
plt.scatter(codons['axis 1'],codons['axis 2'], s=10,marker ='o')
plt.axhline(0, color='black', linestyle='-')
plt.axvline(0, color='black', linestyle='-')
if len(codons) < 200:
for x , y , t in zip(codons['axis 1'],codons['axis 2'] , codons.index.values):
x = x * (1 + 0.01)
y = y * (1 + 0.01)
plt.text(x,y,t)
file = file.replace('.csv','')
save_file_name__ca_codons_plot = file + "_CA_codos_plot.png"
plt.savefig(save_file_name__ca_codons_plot) # return plot file for codon ca result
read_genes_file = pd.read_csv(file_genes)
read_genes_file.rename(columns={genes.columns[0]: 'gene id', genes.columns[1]: 'axis 1', genes.columns[2]: 'axis 2'}, inplace=True)
return read_genes_file
| 4,104 | 1,594 |
"""
Main RenderChan package
"""
| 32 | 12 |
import sys
import os
import shutil
sys.path.append('.')
import chia_rep
def test_filter_peaks():
sample_dict = chia_rep.read_data('test/sample_input_file.txt',
'test/test_files/hg38.chrom.sizes',
output_dir='test/output')
for sample in sample_dict:
sample_dict[sample].filter_peaks(60, 'chr1')
assert len(sample_dict[sample].peak_dict['chr1']) == 60
# if sample == 'sampleA1':
# print(len(sample_dict[sample].peak_dict['chr2']))
# assert len(sample_dict[sample].peak_dict['chr2']) == 30
def test_package():
bin_size = 5000
window_size = 3000000
shutil.rmtree('test/output')
sample_dict = chia_rep.read_data('test/sample_input_file.txt',
'test/test_files/hg38.chrom.sizes',
output_dir='test/output')
l = sample_dict
chia_rep.preprocess(l, output_dir='test/output')
emd_scores, j_scores = chia_rep.compare(l, 'all',
compare_list_file='test/pairs.txt',
bin_size=bin_size,
window_size=window_size,
output_dir='test/output')
chia_rep.output_to_csv(emd_scores, j_scores, window_size, bin_size, 'all',
output_dir='test/output')
assert os.path.isfile('test/output/loops/sampleA1.all.loops')
assert os.path.isfile('test/output/loops/sampleA2.all.loops')
assert os.path.isfile('test/output/loops/sampleB1.all.loops')
assert os.path.isfile('test/output/peaks/sampleA1.all.peaks')
assert os.path.isfile('test/output/peaks/sampleA2.all.peaks')
assert os.path.isfile('test/output/peaks/sampleB1.all.peaks')
param = f'{window_size}.{bin_size}.all'
assert os.path.isfile(f'test/output/{param}/scores/emd_complete.csv')
assert os.path.isfile(f'test/output/{param}/scores/j_complete.csv')
assert os.path.isfile(f'test/output/timings/comparison.{param}.txt')
assert os.path.isfile(f'test/output/timings/read_data.txt')
assert os.path.isfile(
f'test/output/{param}/scores/windows/sampleA1_sampleA2_chr1.txt')
assert os.path.isfile(
f'test/output/{param}/scores/windows/sampleA1_sampleB1_chr1.txt')
assert os.path.isfile(
f'test/output/{param}/scores/windows/sampleA2_sampleB1_chr1.txt')
assert os.path.isfile(
f'test/output/{param}/scores/chromosomes/sampleA1_sampleA2.txt')
assert os.path.isfile(
f'test/output/{param}/scores/chromosomes/sampleA1_sampleB1.txt')
assert os.path.isfile(
f'test/output/{param}/scores/chromosomes/sampleA2_sampleB1.txt')
def test_package2():
bin_size = 5000
window_size = 3000000
shutil.rmtree('test/output')
sample_dict = chia_rep.read_data('test/sample_input_file.txt',
'test/test_files/hg38.chrom.sizes',
output_dir='test/output')
l = sample_dict
chia_rep.preprocess(l, output_dir='test/output')
comparison_list = [
['sampleA1', 'sampleA2'],
['sampleA1', 'sampleB1'],
['sampleA2', 'sampleB1']
]
emd_scores, j_scores = chia_rep.compare(l, 'all',
compare_list=comparison_list,
bin_size=bin_size,
window_size=window_size,
output_dir='test/output')
chia_rep.output_to_csv(emd_scores, j_scores, window_size, bin_size, 'all',
output_dir='test/output')
assert os.path.isfile('test/output/loops/sampleA1.all.loops')
assert os.path.isfile('test/output/loops/sampleA2.all.loops')
assert os.path.isfile('test/output/loops/sampleB1.all.loops')
assert os.path.isfile('test/output/peaks/sampleA1.all.peaks')
assert os.path.isfile('test/output/peaks/sampleA2.all.peaks')
assert os.path.isfile('test/output/peaks/sampleB1.all.peaks')
param = f'{window_size}.{bin_size}.all'
assert os.path.isfile(f'test/output/{param}/scores/emd_complete.csv')
assert os.path.isfile(f'test/output/{param}/scores/j_complete.csv')
assert os.path.isfile(f'test/output/timings/comparison.{param}.txt')
assert os.path.isfile(f'test/output/timings/read_data.txt')
assert os.path.isfile(
f'test/output/{param}/scores/windows/sampleA1_sampleA2_chr1.txt')
assert os.path.isfile(
f'test/output/{param}/scores/windows/sampleA1_sampleB1_chr1.txt')
assert os.path.isfile(
f'test/output/{param}/scores/windows/sampleA2_sampleB1_chr1.txt')
assert os.path.isfile(
f'test/output/{param}/scores/chromosomes/sampleA1_sampleA2.txt')
assert os.path.isfile(
f'test/output/{param}/scores/chromosomes/sampleA1_sampleB1.txt')
assert os.path.isfile(
f'test/output/{param}/scores/chromosomes/sampleA2_sampleB1.txt')
| 5,110 | 1,796 |
# Jacqueline Kory Westlund
# May 2016
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Personal Robots Group
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PySide import QtGui # basic GUI stuff
from jibo_msgs.msg import JiboAction # ROS msgs
from jibo_teleop_ros import jibo_teleop_ros
from functools import partial
class jibo_animation_ui(QtGui.QWidget):
# List of animations for Tega. Not all the SILENT animations are here.
animations = [
JiboAction.EMOJI_SHARK,
JiboAction.EMOJI_BEER,
JiboAction.EMOJI_PARTY_PINK,
JiboAction.EMOJI_PARTY_BLUE,
JiboAction.EMOJI_RAINCLOUD,
JiboAction.HAPPY_GO_LUCKY_DANCE
]
def __init__(self, ros_node):
""" Make a button for each animation """
super(jibo_animation_ui, self).__init__()
# get reference to ros node so we can do callbacks to publish messages
self.ros_node = ros_node
self.hold_last_frame = False #tracks state of whether jibo will hold last frame of animation or not. False by default
# put buttons in a box
anim_box = QtGui.QGroupBox(self)
anim_layout = QtGui.QGridLayout(anim_box)
anim_box.setTitle("Animations")
# create animation buttons and add to layout
col = 0
row = 1
for anim in self.animations:
button = QtGui.QPushButton(anim.lower().replace("\"", ""), anim_box)
button.clicked.connect(partial(self.ros_node.send_motion_message, anim))
# if in the top left, make button green
if (col < 3 and row < 7):
button.setStyleSheet('QPushButton {color: green;}')
# if in top right, make button red
if (col > 2 and row < 3):
button.setStyleSheet('QPushButton {color: red;}')
anim_layout.addWidget(button, row, col)
col += 1
if(col >= 4): # ten animation buttons per row
col = 0
row += 1
#set button to toggle Hold Last Frame
row += 1
self.anim_trans_button = QtGui.QPushButton("Turn Hold-Last-Frame ON",anim_box)
self.anim_trans_button.setStyleSheet('QPushButton {color: green;}')
self.anim_trans_button.clicked.connect(self.on_hold_last_frame_pressed)
anim_layout.addWidget(self.anim_trans_button, row, 0)
def on_hold_last_frame_pressed(self):
if self.hold_last_frame: #we are switching to False state, so the next button press should take us back to TRUE
self.anim_trans_button.setText('"Turn Hold-Last-Frame ON')
self.anim_trans_button.setStyleSheet('QPushButton {color: green;}')
self.ros_node.send_anim_transition_message(JiboAction.ANIMTRANS_RESET)
else:
self.anim_trans_button.setText('"Turn Hold-Last-Frame OFF')
self.anim_trans_button.setStyleSheet('QPushButton {color: red;}')
self.ros_node.send_anim_transition_message(JiboAction.ANIMTRANS_KEEP_LASTFRAME)
self.hold_last_frame = not self.hold_last_frame #flip state to reflect button press
def on_stop_record(self):
print("Stop Recording")
self.record_button.clicked.disconnect()
self.record_button.clicked.connect(self.on_start_record)
| 4,343 | 1,389 |
# Generated by Django 4.0.1 on 2022-01-11 19:00
from django.db import migrations, models
import django.db.models.deletion
import scans.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Host',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(blank=True, default='', max_length=100)),
('hostname_type', models.CharField(blank=True, default='', max_length=50, verbose_name='Host Type')),
('ip_address', models.GenericIPAddressField(protocol='IPv4', verbose_name='IP Address')),
('mac_address', models.CharField(blank=True, default='', max_length=30, verbose_name='MAC Address')),
('assessment_status_1', models.CharField(default='New', max_length=50, verbose_name='Test Status')),
('os_name', models.CharField(blank=True, default='', max_length=100)),
('os_family', models.CharField(blank=True, default='', max_length=100)),
('os_vendor', models.CharField(blank=True, default='', max_length=100)),
('os_gen', models.CharField(blank=True, default='', max_length=100)),
('os_type', models.CharField(blank=True, default='', max_length=100)),
('state', models.CharField(choices=[('up', 'Live'), ('dn', 'Down')], help_text='Host is live or down', max_length=2)),
('state_reason', models.CharField(blank=True, default='', max_length=100, verbose_name='state reason')),
('category', models.CharField(blank=True, default='', max_length=100)),
('criticality', models.SmallIntegerField(default=50, help_text='Asset importance/criticality score 1-100', verbose_name='Asset Criticality')),
('date_discovered', models.DateTimeField(auto_now=True, help_text='Date first seen/scanned')),
('date_last_seen', models.DateTimeField(auto_now=True, help_text='Date last seen/scanned')),
('count_scanned', models.SmallIntegerField(help_text='Number of times this host has been scanned')),
],
options={
'verbose_name': 'host',
'verbose_name_plural': 'hosts',
'ordering': ('ip_address',),
},
),
migrations.CreateModel(
name='Scan',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique_for_date='date_created', verbose_name='name')),
('arguments', models.TextField()),
('scan_start', models.DateTimeField(verbose_name='Scan Start')),
('scan_end', models.DateTimeField(verbose_name='Scan End')),
('duration', models.DurationField()),
('nmap_version', models.CharField(max_length=20)),
('xml_version', models.CharField(max_length=20)),
('count_live_hosts', models.IntegerField(verbose_name='Live Hosts')),
('scan_file', models.FileField(help_text='Nmap XML file', upload_to=scans.models.set_files_path, verbose_name='scan file')),
('scan_md5', models.CharField(help_text='MD5 hash of the XML file used for this scan record', max_length=32, verbose_name='MD5')),
('notes', models.TextField(blank=True, default='')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'scan',
'verbose_name_plural': 'scans',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='ScanPolicy',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=75, unique=True, verbose_name='name')),
('scan_type', models.CharField(default='Discovery', max_length=50, verbose_name='scan type')),
('arguments', models.TextField(help_text='nmap scan command arguments', verbose_name='arguments')),
('output_filename', models.CharField(default='nmap-scan-date.xml', max_length=150, verbose_name='output filename')),
('notes', models.TextField(blank=True, default='')),
],
options={
'verbose_name': 'scan policy',
'verbose_name_plural': 'scan policies',
'ordering': ('scan_type', 'name'),
},
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('port_number', models.IntegerField(verbose_name='port')),
('port_proto', models.CharField(max_length=10, verbose_name='protocol')),
('service_name', models.CharField(blank=True, default='', max_length=255, verbose_name='service name')),
('product_name', models.CharField(blank=True, default='', max_length=255, verbose_name='product name')),
('product_version', models.CharField(blank=True, default='', max_length=50, verbose_name='product version')),
('product_extrainfo', models.TextField(blank=True, default='', help_text='Extra service info and/or raw scan output')),
('assessment_status_1', models.CharField(default='New', max_length=50, verbose_name='Test Status')),
('state', models.CharField(choices=[('up', 'Live'), ('dn', 'Down')], max_length=2, verbose_name='state')),
('state_reason', models.CharField(blank=True, default='', max_length=100, verbose_name='state reason')),
('category', models.CharField(blank=True, default='', max_length=100)),
('attack_value', models.SmallIntegerField(default=0, help_text='Attack value score 1-100', verbose_name='attack value')),
('notes', models.TextField(blank=True, default='')),
('date_discovered', models.DateTimeField(auto_now=True, help_text='Date first seen/scanned')),
('date_last_seen', models.DateTimeField(auto_now=True, help_text='Date last seen/scanned')),
('count_scanned', models.SmallIntegerField(default=1, help_text='Number of times this host & port has been scanned')),
('host_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scans.host', verbose_name='Host ID')),
('scan_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scans.scan', verbose_name='Scan ID')),
],
options={
'verbose_name': 'service',
'verbose_name_plural': 'services',
'ordering': ('port_number',),
},
),
migrations.AddField(
model_name='host',
name='scan_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='scans.scan', verbose_name='Scan ID'),
),
]
| 7,470 | 2,100 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **
#
# ======================== #
# CHECK_RESERVATIONS_COMMAND #
# ======================== #
# Command for checking reservations.
#
# @author ES
# **
import logging
from collections import OrderedDict
from es_common.command.es_command import ESCommand
from es_common.enums.command_enums import ActionCommand
class CheckReservationsCommand(ESCommand):
def __init__(self, is_speech_related=False):
super(CheckReservationsCommand, self).__init__(is_speech_related=is_speech_related)
self.logger = logging.getLogger("GetReservations Command")
self.command_type = ActionCommand.CHECK_RESERVATIONS
# =======================
# Override Parent methods
# =======================
def execute(self):
success = False
try:
self.logger.info("Not implemented!")
except Exception as e:
self.logger.error("Error while checking the reservation! {}".format(e))
finally:
return success
def reset(self):
pass
def clone(self):
return CheckReservationsCommand()
###
# SERIALIZATION
###
def serialize(self):
return OrderedDict([
("id", self.id),
("command_type", self.command_type.name)
])
def deserialize(self, data, hashmap={}):
self.id = data["id"]
hashmap[data["id"]] = self
return True
| 1,447 | 424 |
# coding: utf-8
from email.mime.text import MIMEText
from email.parser import Parser
import os
import pytest
@pytest.fixture
def debugsmtp(request, tmpdir):
from mr.hermes import DebuggingServer
debugsmtp = DebuggingServer(('localhost', 0), ('localhost', 0))
debugsmtp.path = str(tmpdir)
yield debugsmtp
debugsmtp.close()
@pytest.fixture
def debugsmtp_thread(debugsmtp):
import asyncore
import threading
thread = threading.Thread(
target=asyncore.loop,
kwargs=dict(
timeout=1))
thread.start()
yield thread
debugsmtp.close()
thread.join()
@pytest.fixture
def sendmail(debugsmtp, debugsmtp_thread):
def sendmail(msg):
import smtplib
(host, port) = debugsmtp.socket.getsockname()
s = smtplib.SMTP(host, port)
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
return sendmail
@pytest.fixture
def email_msg():
msg = MIMEText(u'Söme text', 'plain', 'utf-8')
msg['Subject'] = 'Testmail'
msg['From'] = 'sender@example.com'
msg['To'] = 'receiver@example.com'
return msg
def test_mails_filename_order(debugsmtp):
me = 'bar@example.com'
you = 'foo@example.com'
for i in range(10):
msg = MIMEText('Mail%02i.' % i)
msg['Subject'] = 'Test'
msg['From'] = me
msg['To'] = you
debugsmtp.process_message(('localhost', 0), me, [you], msg.as_string())
mail_content = []
path = os.path.join(debugsmtp.path, 'foo@example.com')
for filename in os.listdir(path):
with open(os.path.join(path, filename)) as f:
msg = Parser().parsestr(f.read())
mail_content.append(msg.get_payload())
assert mail_content == [
'Mail00.', 'Mail01.', 'Mail02.', 'Mail03.', 'Mail04.',
'Mail05.', 'Mail06.', 'Mail07.', 'Mail08.', 'Mail09.']
def test_functional(sendmail, email_msg, tmpdir):
sendmail(email_msg)
(receiver,) = tmpdir.listdir()
assert receiver.basename == 'receiver@example.com'
(email_path,) = receiver.listdir()
assert email_path.basename.endswith('.eml')
with email_path.open() as f:
email = Parser().parsestr(f.read())
body = email.get_payload(decode=True)
body = body.decode(email.get_content_charset())
assert email['Subject'] == 'Testmail'
assert email['From'] == 'sender@example.com'
assert email['To'] == 'receiver@example.com'
assert u'Söme text' in body
| 2,471 | 871 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import uuid
import warnings
import importlib
import traceback
import subprocess
from io import StringIO
from pathlib import Path
from functools import partial
from multiprocessing import cpu_count
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import yaml
import numpy
import requests
from pandas import DataFrame, Int64Dtype, isnull, isna, read_csv, NA
from tqdm import tqdm
from .anomaly import detect_anomaly_all, detect_stale_columns
from .cast import column_convert
from .concurrent import process_map
from .net import download_snapshot
from .io import read_file, fuzzy_text, export_csv
from .utils import (
ROOT,
CACHE_URL,
combine_tables,
drop_na_records,
filter_output_columns,
infer_new_and_total,
stratify_age_and_sex,
)
class DataSource:
"""
Interface for data sources. A data source consists of a series of steps performed in the
following order:
1. Fetch: download resources into raw data
1. Parse: convert raw data to structured format
1. Merge: associate each record with a known `key`
The default implementation of a data source includes the following functionality:
* Fetch: downloads raw data from a list of URLs into ../snapshots folder. See [lib.net].
* Merge: outputs a key from the auxiliary dataset after performing best-effort matching.
The merge function provided here is crucial for many sources that use it. The easiest/fastest
way to merge records is by providing the exact `key` that will match an existing record in the
[data/metadata.csv] file.
"""
config: Dict[str, Any]
def __init__(self, config: Dict[str, Any] = None):
super().__init__()
self.config = config or {}
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> List[str]:
"""
Downloads the required resources and returns a list of local paths.
Args:
output_folder: Root folder where snapshot, intermediate and tables will be placed.
cache: Map of data sources that are stored in the cache layer (used for daily-only).
fetch_opts: Additional options defined in the DataPipeline config.yaml.
Returns:
List[str]: List of absolute paths where the fetched resources were stored, in the same
order as they are defined in `config`.
"""
return [
download_snapshot(source_config["url"], output_folder, **source_config.get("opts", {}))
for source_config in fetch_opts
]
def _read(self, file_paths: List[str], **read_opts) -> List[DataFrame]:
""" Reads a raw file input path into a DataFrame """
return [read_file(file_path, **read_opts) for file_path in file_paths]
def parse(self, sources: List[str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
""" Parses a list of raw data records into a DataFrame. """
# Some read options are passed as parse_opts
read_opts = {k: v for k, v in parse_opts.items() if k in ("sep",)}
return self.parse_dataframes(self._read(sources, **read_opts), aux, **parse_opts)
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
""" Parse the inputs into a single output dataframe """
raise NotImplementedError()
def merge(self, record: Dict[str, Any], aux: Dict[str, DataFrame]) -> Optional[str]:
"""
Outputs a key used to merge this record with the datasets.
The key must be present in the `aux` DataFrame index.
"""
# Merge only needs the metadata auxiliary data table
metadata = aux["metadata"]
# Exact key match might be possible and it's the fastest option
if "key" in record and not isnull(record["key"]):
if record["key"] in metadata["key"].values:
return record["key"]
else:
warnings.warn("Key provided but not found in metadata: {}".format(record))
return None
# Start by filtering the auxiliary dataset as much as possible
for column_prefix in ("country", "subregion1", "subregion2"):
for column_suffix in ("code", "name"):
column = "{}_{}".format(column_prefix, column_suffix)
if column not in record:
continue
elif isnull(record[column]):
metadata = metadata[metadata[column].isna()]
elif record[column]:
metadata = metadata[metadata[column] == record[column]]
# Auxiliary dataset might have a single record left, then we are done
if len(metadata) == 1:
return metadata.iloc[0]["key"]
# Compute a fuzzy version of the record's match string for comparison
match_string = fuzzy_text(record["match_string"]) if "match_string" in record else None
# Provided match string could be a subregion code / name
if match_string is not None:
for column_prefix in ("subregion1", "subregion2"):
for column_suffix in ("code", "name"):
column = "{}_{}".format(column_prefix, column_suffix)
aux_match = metadata[column + "_fuzzy"] == match_string
if sum(aux_match) == 1:
return metadata[aux_match].iloc[0]["key"]
# Provided match string could be identical to `match_string` (or with simple fuzzy match)
if match_string is not None:
aux_match_1 = metadata["match_string_fuzzy"] == match_string
if sum(aux_match_1) == 1:
return metadata[aux_match_1].iloc[0]["key"]
aux_match_2 = metadata["match_string"] == record["match_string"]
if sum(aux_match_2) == 1:
return metadata[aux_match_2].iloc[0]["key"]
# Last resort is to match the `match_string` column with a regex from aux
if match_string is not None:
aux_mask = ~metadata["match_string"].isna()
aux_regex = metadata["match_string"][aux_mask].apply(
lambda x: re.compile(x, re.IGNORECASE)
)
for search_string in (match_string, record["match_string"]):
aux_match = aux_regex.apply(lambda x: True if x.match(search_string) else False)
if sum(aux_match) == 1:
metadata = metadata[aux_mask]
return metadata[aux_match].iloc[0]["key"]
# Uncomment when debugging mismatches
# print(aux_regex)
# print(match_string)
# print(record)
# print(metadata)
# raise ValueError()
warnings.warn("No key match found for:\n{}".format(record))
return None
def run(
self,
output_folder: Path,
cache: Dict[str, str],
aux: Dict[str, DataFrame],
skip_existing: bool = False,
) -> DataFrame:
"""
Executes the fetch, parse and merge steps for this data source.
Args:
output_folder: Root folder where snapshot, intermediate and tables will be placed.
cache: Map of data sources that are stored in the cache layer (used for daily-only).
aux: Map of auxiliary DataFrames used as part of the processing of this DataSource.
skip_existing: Flag indicating whether to use the locally stored snapshots if possible.
Returns:
DataFrame: Processed data, with columns defined in config.yaml corresponding to the
DataPipeline that this DataSource is part of.
"""
data: DataFrame = None
# Insert skip_existing flag to fetch options if requested
fetch_opts = self.config.get("fetch", [])
if skip_existing:
for opt in fetch_opts:
opt["opts"] = {**opt.get("opts", {}), "skip_existing": True}
# Fetch the data, feeding the cached resources to the fetch step
data = self.fetch(output_folder, cache, fetch_opts)
# Make yet another copy of the auxiliary table to avoid affecting future steps in `parse`
parse_opts = self.config.get("parse", {})
data = self.parse(data, {name: df.copy() for name, df in aux.items()}, **parse_opts)
# Merge expects for null values to be NaN (otherwise grouping does not work as expected)
data.replace([None], numpy.nan, inplace=True)
# Merging is done record by record, but can be sped up if we build a map first aggregating
# by the non-temporal fields and only matching the aggregated records with keys
merge_opts = self.config.get("merge", {})
key_merge_columns = [
col for col in data if col in aux["metadata"].columns and len(data[col].unique()) > 1
]
if not key_merge_columns or (merge_opts and merge_opts.get("serial")):
data["key"] = data.apply(lambda x: self.merge(x, aux), axis=1)
else:
# "_nan_magic_number" replacement necessary to work around
# https://github.com/pandas-dev/pandas/issues/3729
# This issue will be fixed in Pandas 1.1
_nan_magic_number = -123456789
grouped_data = (
data.fillna(_nan_magic_number)
.groupby(key_merge_columns)
.first()
.reset_index()
.replace([_nan_magic_number], numpy.nan)
)
# Build a _vec column used to merge the key back from the groups into data
make_key_vec = lambda x: "|".join([str(x[col]) for col in key_merge_columns])
grouped_data["_vec"] = grouped_data.apply(make_key_vec, axis=1)
data["_vec"] = data.apply(make_key_vec, axis=1)
# Iterate only over the grouped data to merge with the metadata key
grouped_data["key"] = grouped_data.apply(lambda x: self.merge(x, aux), axis=1)
# Merge the grouped data which has key back with the original data
if "key" in data.columns:
data = data.drop(columns=["key"])
data = data.merge(grouped_data[["key", "_vec"]], on="_vec").drop(columns=["_vec"])
# Drop records which have no key merged
# TODO: log records with missing key somewhere on disk
data = data.dropna(subset=["key"])
# Filter out data according to the user-provided filter function
if "query" in self.config:
data = data.query(self.config["query"])
# Get the schema of our index table, necessary for processing to infer which columns in the
# data belong to the index and should not be aggregated
index_schema = DataPipeline.load("index").schema
# Provide a stratified view of certain key variables
if any(stratify_column in data.columns for stratify_column in ("age", "sex")):
data = stratify_age_and_sex(data, index_schema)
# Process each record to add missing cumsum or daily diffs
data = infer_new_and_total(data, index_schema)
# Return the final dataframe
return data
class DataPipeline:
"""
A pipeline chain is a collection of individual [DataSource]s which produce a full table
ready for output. This is a very thin wrapper that runs the data pipelines and combines their
outputs.
One of the reasons for a dedicated class is to allow for discovery of [DataPipeline] objects
via reflection, users of this class are encouraged to override its methods if custom processing
is required.
A pipeline chain is responsible for loading the auxiliary datasets that are passed to the
individual pipelines. Pipelines can load data themselves, but if the same auxiliary dataset
is used by many of them it is more efficient to load it here.
"""
schema: Dict[str, Any]
""" Names and corresponding dtypes of output columns """
data_sources: List[Tuple[DataSource, Dict[str, Any]]]
""" List of <data source, option> tuples executed in order """
auxiliary_tables: Dict[str, Union[Path, str]] = {
"metadata": ROOT / "src" / "data" / "metadata.csv"
}
""" Auxiliary datasets passed to the pipelines during processing """
def __init__(
self,
schema: Dict[str, type],
auxiliary: Dict[str, Union[Path, str]],
data_sources: List[Tuple[DataSource, Dict[str, Any]]],
):
super().__init__()
self.schema = schema
self.auxiliary_tables = {**self.auxiliary_tables, **auxiliary}
self.data_sources = data_sources
@staticmethod
def load(name: str):
config_path = ROOT / "src" / "pipelines" / name / "config.yaml"
with open(config_path, "r") as fd:
config_yaml = yaml.safe_load(fd)
schema = {
name: DataPipeline._parse_dtype(dtype) for name, dtype in config_yaml["schema"].items()
}
auxiliary = {name: ROOT / path for name, path in config_yaml.get("auxiliary", {}).items()}
pipelines = []
for pipeline_config in config_yaml["sources"]:
module_tokens = pipeline_config["name"].split(".")
class_name = module_tokens[-1]
module_name = ".".join(module_tokens[:-1])
module = importlib.import_module(module_name)
pipelines.append(getattr(module, class_name)(pipeline_config))
return DataPipeline(schema, auxiliary, pipelines)
@staticmethod
def _parse_dtype(dtype_name: str) -> type:
if dtype_name == "str":
return str
if dtype_name == "int":
return Int64Dtype()
if dtype_name == "float":
return float
raise TypeError(f"Unsupported dtype: {dtype_name}")
def output_table(self, data: DataFrame) -> DataFrame:
"""
This function performs the following operations:
1. Filters out columns not in the output schema
2. Converts each column to the appropriate type
3. Sorts the values based on the column order
4. Outputs the resulting data
"""
output_columns = list(self.schema.keys())
# Make sure all columns are present and have the appropriate type
for column, dtype in self.schema.items():
if column not in data:
data[column] = None
data[column] = column_convert(data[column], dtype)
# Filter only output columns and output the sorted data
return drop_na_records(data[output_columns], ["date", "key"]).sort_values(output_columns)
@staticmethod
def _run_wrapper(
output_folder: Path,
cache: Dict[str, str],
aux: Dict[str, DataFrame],
data_source: DataSource,
) -> Optional[DataFrame]:
""" Workaround necessary for multiprocess pool, which does not accept lambda functions """
try:
return data_source.run(output_folder, cache, aux)
except Exception:
data_source_name = data_source.__class__.__name__
warnings.warn(
f"Error running data source {data_source_name} with config {data_source.config}"
)
traceback.print_exc()
return None
def run(
self,
pipeline_name: str,
output_folder: Path,
process_count: int = cpu_count(),
verify: str = "simple",
progress: bool = True,
) -> DataFrame:
"""
Main method which executes all the associated [DataSource] objects and combines their
outputs.
"""
# Read the cache directory from our cloud storage
try:
cache = requests.get("{}/sitemap.json".format(CACHE_URL)).json()
except:
cache = {}
warnings.warn("Cache unavailable")
# Read the auxiliary input files into memory
aux = {name: read_file(file_name) for name, file_name in self.auxiliary_tables.items()}
# Precompute some useful transformations in the auxiliary input files
aux["metadata"]["match_string_fuzzy"] = aux["metadata"].match_string.apply(fuzzy_text)
for category in ("country", "subregion1", "subregion2"):
for suffix in ("code", "name"):
column = "{}_{}".format(category, suffix)
aux["metadata"]["{}_fuzzy".format(column)] = aux["metadata"][column].apply(
fuzzy_text
)
# Get all the pipeline outputs
# This operation is parallelized but output order is preserved
# Make a copy of the auxiliary table to prevent modifying it for everyone, but this way
# we allow for local modification (which might be wanted for optimization purposes)
aux_copy = {name: df.copy() for name, df in aux.items()}
# Create a function to be used during mapping. The nestedness is an unfortunate outcome of
# the multiprocessing module's limitations when dealing with lambda functions, coupled with
# the "sandboxing" we implement to ensure resiliency.
run_func = partial(DataPipeline._run_wrapper, output_folder, cache, aux_copy)
# If the process count is less than one, run in series (useful to evaluate performance)
data_sources_count = len(self.data_sources)
progress_label = f"Run {pipeline_name} pipeline"
if process_count <= 1 or data_sources_count <= 1:
map_func = tqdm(
map(run_func, self.data_sources),
total=data_sources_count,
desc=progress_label,
disable=not progress,
)
else:
map_func = process_map(
run_func, self.data_sources, desc=progress_label, disable=not progress
)
# Save all intermediate results (to allow for reprocessing)
intermediate_outputs = output_folder / "intermediate"
intermediate_outputs_files = []
for data_source, result in zip(self.data_sources, map_func):
data_source_class = data_source.__class__
data_source_config = str(data_source.config)
source_full_name = f"{data_source_class.__module__}.{data_source_class.__name__}"
intermediate_name = uuid.uuid5(
uuid.NAMESPACE_DNS, f"{source_full_name}.{data_source_config}"
)
intermediate_file = intermediate_outputs / f"{intermediate_name}.csv"
intermediate_outputs_files += [intermediate_file]
if result is not None:
export_csv(result, intermediate_file)
# Reload all intermediate results from disk
# In-memory results are discarded, this ensures reproducibility and allows for data sources
# to fail since the last successful intermediate result will be used in the combined output
pipeline_outputs = []
for source_output in intermediate_outputs_files:
try:
pipeline_outputs += [read_file(source_output)]
except Exception as exc:
warnings.warn(f"Failed to read intermediate file {source_output}. Error: {exc}")
# Get rid of all columns which are not part of the output to speed up data combination
pipeline_outputs = [
source_output[filter_output_columns(source_output.columns, self.schema)]
for source_output in pipeline_outputs
]
# Combine all pipeline outputs into a single DataFrame
if not pipeline_outputs:
warnings.warn("Empty result for pipeline chain {}".format(pipeline_name))
data = DataFrame(columns=self.schema.keys())
else:
progress_label = pipeline_name if progress else None
data = combine_tables(pipeline_outputs, ["date", "key"], progress_label=progress_label)
# Return data using the pipeline's output parameters
data = self.output_table(data)
# Skip anomaly detection unless requested
if verify == "simple":
# Validate that the table looks good
detect_anomaly_all(self.schema, data, [pipeline_name])
if verify == "full":
# Perform stale column detection for each known key
map_iter = data.key.unique()
map_func = lambda key: detect_stale_columns(
self.schema, data[data.key == key], (pipeline_name, key)
)
progress_label = f"Verify {pipeline_name} pipeline"
if process_count <= 1 or len(map_iter) <= 1:
map_func = tqdm(
map(map_func, map_iter),
total=len(map_iter),
desc=progress_label,
disable=not progress,
)
else:
map_func = process_map(
map_func, map_iter, desc=progress_label, disable=not progress
)
# Show progress as the results arrive if requested
if progress:
map_func = tqdm(
map_func, total=len(map_iter), desc=f"Verify {pipeline_name} pipeline"
)
# Consume the results
_ = list(map_func)
return data
| 21,967 | 5,910 |