content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Copyright (c) 2013 Riccardo Lucchese, riccardo.lucchese at gmail.com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
import math
import numpy
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
from math2D import *
class Target():
_target_id = 0
def __init__(self, pos=Point2D(1,1), targetid = None):
assert isinstance(pos, Point2D)
self._pos = pos
self._traj = []
# set the target id
Target._target_id += 1
if targetid:
self._id = targetid
else:
self._id = Target._target_id
def step(self, time, dt, walls=[]):
assert numeric_real(time)
assert numeric_real(dt)
assert dt > 0
r = 3
f = 0.01
self._pos.x = 9.5/2. + 3.5*math.cos(time*f + math.pi)
self._pos.y = 3 + 2.5*math.sin(time*f + math.pi)
def pos(self):
return self._pos
def id(self):
return self._id
def plot(self, axis):
pos = self._pos.tuple()
if 0:
axis.add_patch(matplotlib.patches.Circle(pos, radius=0.1, alpha=0.5))
for line in self._traj:
assert isinstance(line, Line2D)
axis.add_line(matplotlib.lines.Line2D([line.p1.x, line.p2.x],[line.p1.y, line.p2.y], color=(1,0.,0.), alpha=1))#, zorder=-100))
class RandomTarget(Target):
def __init__(self, pos):
Target.__init__(self, pos)
def step(self, cur_time, step, walls=[]):
assert numeric_real(time)
assert numeric_real(step)
assert step > 0
loop = True
while loop:
old_point = Point2D(self._pos.x, self._pos.y)
dx, dy = numpy.random.normal(0, 0.25, 2)
newx = numpy.clip(self._pos.x + dx, 0, 9)
newy = numpy.clip(self._pos.y + dy, 0, 6)
new_point = Point2D(newx, newy)
line = Line2D(old_point, new_point)
loop = False
for wall in walls:
if line.intersects(wall):
#print "RandomTarget intersected wall ", wall
# ops we bumped into a wall, retry :)
loop = True
break
self._pos.x = newx
self._pos.y = newy
class GraphTargetBase(Target):
def __init__(self, targetid=None):
Target.__init__(self, Point2D(0,0), targetid)
self._graph = nx.Graph()
self._graph.position = {}
self._cur_node = None
self._target_pos = None
self._moving = False
def step(self, cur_time, step, walls=[]):
assert numeric_real(cur_time)
assert numeric_real(step)
assert step > 0
if self._moving:
VEL = 0.075
STEP = VEL*step
cur_pos = self._pos
line = Line2D(cur_pos, self._target_pos)
if line.norm() < STEP:
newx = self._target_pos.x
newy = self._target_pos.y
self._moving = False
else:
dx = self._target_pos.x - cur_pos.x
dy = self._target_pos.y - cur_pos.y
dx = dx*(STEP/line.norm())
dy = dy*(STEP/line.norm())
newx = cur_pos.x + dx
newy = cur_pos.y + dy
self._pos.x = newx
self._pos.y = newy
else:
self.plan(walls)
self._moving = True
self.step(cur_time, step, walls)
def plot(self, axis):
# Plot the target badge and trajectory first
Target.plot(self, axis)
# debug the transition graph
if 0:
#node_pos = []
#for v in self._graph.nodes():
# p = self._graph.position[v]
# node_pos.append(p)
node_pos = [self._graph.position[v] for v in self._graph.nodes()]
nx.draw_networkx_edges(self._graph, self._graph.position, self._graph.edges(), edge_color='y', alpha=0.25, ax=axis)
nx.draw_networkx_nodes(self._graph, self._graph.position, self._graph.nodes(), 200, node_color='r', ax=axis)
nx.draw_networkx_labels(self._graph, self._graph.position, ax=axis)
class MarkovTarget(GraphTargetBase):
def __init__(self, targetid=None):
GraphTargetBase.__init__(self, targetid)
# build the transition graph
self._graph.add_node(1)
self._graph.position[1] = (7.75, 5.25)
self._graph.add_node(2)
self._graph.position[2] = (6.5, 5.25)
self._graph.add_node(3)
self._graph.position[3] = (5.75, 4)
self._graph.add_node(4)
self._graph.position[4] = (5, 4.75)
self._graph.add_node(5)
self._graph.position[5] = (3, 5.25)
self._graph.add_node(6)
self._graph.position[6] = (1.75, 5.5)
self._graph.add_node(7)
self._graph.position[7] = (1.5, 4.75)
self._graph.add_node(8)
self._graph.position[8] = (1.75, 3)
self._graph.add_node(10)
self._graph.position[10] = (1.5, 1.25)
self._graph.add_node(11)
self._graph.position[11] = (3, 1.)
self._graph.add_node(12)
self._graph.position[12] = (4, 2)
self._graph.add_node(13)
self._graph.position[13] = (4.5, 1)
self._graph.add_node(14)
self._graph.position[14] = (5.75, 2)
self._graph.add_node(15)
self._graph.position[15] = (7, 1.)
self._graph.add_node(16)
self._graph.position[16] = (8, 1.25)
self._graph.add_node(17)
self._graph.position[17] = (8.25, 2)
self._graph.add_node(18)
self._graph.position[18] = (7.5, 4.)
self._graph.add_edge(1,2)
self._graph.add_edge(2,3)
self._graph.add_edge(2,4)
self._graph.add_edge(3,4)
self._graph.add_edge(4,5)
self._graph.add_edge(4,7)
self._graph.add_edge(5,6)
self._graph.add_edge(5,7)
self._graph.add_edge(6,7)
self._graph.add_edge(7,8)
self._graph.add_edge(7,10)
self._graph.add_edge(8,10)
self._graph.add_edge(10,11)
self._graph.add_edge(11,12)
self._graph.add_edge(11,13)
self._graph.add_edge(12,13)
self._graph.add_edge(12,14)
self._graph.add_edge(13,14)
self._graph.add_edge(13,15)
self._graph.add_edge(14,3)
self._graph.add_edge(14,13)
self._graph.add_edge(14,15)
self._graph.add_edge(15,16)
self._graph.add_edge(15,17)
self._graph.add_edge(16,17)
self._graph.add_edge(17,18)
self._graph.add_edge(17,1)
self._graph.add_edge(18,1)
self._cur_node = 10
self._pos = Point2D(*self._graph.position[self._cur_node])
def plan(self, walls):
loop = True
old_point = self._pos
neighbors = self._graph[self._cur_node].keys()
while loop:
# select the next node
next = neighbors[numpy.random.randint(len(neighbors))]
xc, yc = self._graph.position[next]
# 3 and 14 are the nodes at the entry/exit of the passage
# we use a smaller variance to avoid bumping into passage lateral
# wall of
sigma2 = 0.175
if next in [3,14]:
sigma2 = 0.1
for i in xrange(0,10):
dx, dy = numpy.random.normal(0, sigma2, 2)
newx = numpy.clip(xc + dx, 0, 9)
newy = numpy.clip(yc + dy, 0, 6)
new_point = Point2D(newx, newy)
line = Line2D(old_point, new_point)
self._target_pos = new_point
# check if the new segment in the target trajectory
# intersects any walls
hit = False
for wall in walls:
if line.intersects(wall):
# ops we bumped into a wall, retry :)
hit = True
break
if not hit:
self._cur_node = next
self._traj.append(Line2D(old_point, self._target_pos))
loop = False
break
class EightTarget(GraphTargetBase):
def __init__(self, targetid=None):
GraphTargetBase.__init__(self, targetid)
# build the eight shaped transition graph
self._graph.add_node(1)
self._graph.position[1] = (5.75, 4.)
self._graph.add_node(2)
self._graph.position[2] = (4.5, 4.75)
self._graph.add_node(3)
self._graph.position[3] = (3., 4.95)
self._graph.add_node(4)
self._graph.position[4] = (1.5, 4.75)
self._graph.add_node(5)
self._graph.position[5] = (1.5, 3)
self._graph.add_node(6)
self._graph.position[6] = (1.5, 1.5)
self._graph.add_node(7)
self._graph.position[7] = (2.5, 1)
self._graph.add_node(8)
self._graph.position[8] = (3.5, 1.25)
self._graph.add_node(9)
self._graph.position[9] = (3.75, 2)
self._graph.add_node(10)
self._graph.position[10] = (5.75, 2)
self._graph.add_node(11)
self._graph.position[11] = self._graph.position[1]
self._graph.add_node(12)
self._graph.position[12] = (6, 5.25)
self._graph.add_node(13)
self._graph.position[13] = (7.5, 5.25)
self._graph.add_node(14)
self._graph.position[14] = (7.75, 3)
self._graph.add_node(15)
self._graph.position[15] = (8, 1.5)
self._graph.add_node(16)
self._graph.position[16] = (7, 1.25)
self._graph.add_node(17)
self._graph.position[17] = (6, 1.25)
self._graph.add_node(18)
self._graph.position[18] = self._graph.position[10]
self._graph.add_edge(1,2)
self._graph.add_edge(2,3)
self._graph.add_edge(3,4)
self._graph.add_edge(4,5)
self._graph.add_edge(5,6)
self._graph.add_edge(6,7)
self._graph.add_edge(7,8)
self._graph.add_edge(8,9)
self._graph.add_edge(9,10)
self._graph.add_edge(10,11)
self._graph.add_edge(11,12)
self._graph.add_edge(12,13)
self._graph.add_edge(13,14)
self._graph.add_edge(14,15)
self._graph.add_edge(15,16)
self._graph.add_edge(16,17)
self._graph.add_edge(17,18)
self._graph.add_edge(18,1)
self._cur_node = 10
self._pos = Point2D(*self._graph.position[self._cur_node])
def plan(self, walls):
loop = True
old_point = self._pos
neighbors = self._graph[self._cur_node].keys()
while loop:
# select the next node
if self._cur_node + 1 in neighbors:
next = self._cur_node + 1
else:
assert 1 in neighbors
next = 1
xc, yc = self._graph.position[next]
# 10 11 17 1 are the nodes at the entry/exit of the passage
# we use a smaller variance to avoid bumping into the lateral walls
# of the passage
sigma2 = 0.175
if next in [1,10,11,18]:
sigma2 = 0.1
for i in xrange(0,10):
dx, dy = numpy.random.normal(0, sigma2, 2)
newx = numpy.clip(xc + dx, 0, 9)
newy = numpy.clip(yc + dy, 0, 6)
new_point = Point2D(newx, newy)
line = Line2D(old_point, new_point)
self._target_pos = new_point
# check if the new segment in the target trajectory
# intersects any walls
hit = False
for wall in walls:
if line.intersects(wall):
# ops we bumped into a wall, retry :)
hit = True
break
if not hit:
self._cur_node = next
self._traj.append(Line2D(old_point, self._target_pos))
loop = False
break
print "Target traj. planning, discarding segment:", Line2D(old_point, self._target_pos)
print " cur_node, next_node:", self._cur_node, next
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Wrapper for running Oncotator
"""
from snakemake.shell import shell
__author__ = "Manuel Holtgrewe"
__email__ = "manuel.holtgrewe@bihealth.de"
shell(
r"""
# -----------------------------------------------------------------------------
# Redirect stderr to log file by default and enable printing executed commands
exec 2> >(tee -a "{snakemake.log}")
set -x
# -----------------------------------------------------------------------------
module purge
module load HTSlib/1.2.1-foss-2015a
module load BCFtools/1.2-foss-2015a
module load Oncotator/v1.8.0.0-foss-2015a-Python-2.7.9
# Shortcut to corpus directory (line length limit...)
corpus={snakemake.config[step_config][somatic_variant_annotation][oncotator][path_corpus]}
# Save original sample names
bcftools view -h {snakemake.input.vcf} | tail -n 1 | cut -f 10- | tr '\t' '\n' \
>{snakemake.output.samples}
# Prepare input VCF file for Oncotator ------------------------------------------------
# Create new samples file with TUMOR/NORMAL
echo -e "TUMOR\nNORMAL" > {snakemake.output.fake_samples}
# Create transmogrified VCF file for the input of Oncotator
bcftools filter \
-r "1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,X,Y" \
{snakemake.input.vcf} \
| bcftools reheader --samples {snakemake.output.fake_samples} \
> {snakemake.output.vcf_onco_in}
# Call Oncotator with VCF output ------------------------------------------------------
# Perform Oncotator annotation (using fake sample names)
oncotator -v -i VCF -o VCF \
--db-dir $corpus \
-c $corpus/override_lists/tx_exact_uniprot_matches.AKT1_CRLF2_FGFR1.txt \
--log_name $(dirname {snakemake.log})/oncotator.vcf.log \
{snakemake.output.vcf_onco_in} \
{snakemake.output.tmp_vcf} \
{snakemake.params.genome}
# Add back the real sample names
bcftools reheader --samples {snakemake.output.samples} {snakemake.output.tmp_vcf} \
| bgzip -c \
>{snakemake.output.vcf}
tabix {snakemake.output.vcf}
# Compute MD5 sums
pushd $(dirname {snakemake.output.vcf}) && \
md5sum $(basename {snakemake.output.vcf}) >$(basename {snakemake.output.vcf_md5}) && \
md5sum $(basename {snakemake.output.tbi}) >$(basename {snakemake.output.tbi_md5}) && \
popd
# Call Oncotator with MAF output ------------------------------------------------------
# Perform Oncotator annotation (using fake sample names)
oncotator -v -i VCF -o TCGAMAF \
--db-dir $corpus \
-c $corpus/override_lists/tx_exact_uniprot_matches.AKT1_CRLF2_FGFR1.txt \
--log_name $(dirname {snakemake.log})/oncotator.vcf.log \
{snakemake.output.vcf_onco_in} \
{snakemake.output.tmp_maf} \
{snakemake.params.genome}
bgzip -c {snakemake.output.tmp_maf} >{snakemake.output.maf}
# Compute MD5 sums
pushd $(dirname {snakemake.output.vcf}) && \
md5sum $(basename {snakemake.output.maf}) >$(basename {snakemake.output.maf_md5}) && \
popd
"""
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to resolve the current platform and bitness that works across
infrastructure systems.
"""
import itertools
import platform
import sys
def get():
"""Returns the normalized platform and bitness values.
Platform: linux, mac, win
Machine:
- x86_64 (Intel 64-bit)
- x86 (Intel 32-bit)
- armv6l (ARM 32-bit v6)
- arm64 (ARM 64-bit)
- <other> (Unknown, returned by platform.machine())
Bits: 32, 64
Returns:
plat (str): The name of the current platform.
machine (str): The normalized machine type.
bits (int): The bitness of the current platform, one of 32, 64.
Raises:
ValueError if both the platform and bitness could not be resolved.
"""
plat = sys.platform
machine = platform.machine().lower()
arch = platform.architecture()[0]
if plat.startswith('linux'):
plat = 'linux'
elif plat.startswith(('win', 'cygwin')):
plat = 'win'
elif plat.startswith(('darwin', 'mac')):
plat = 'mac'
else: # pragma: no cover
raise ValueError("Don't understand platform [%s]" % (plat,))
# Normalize "machine".
if machine.startswith('arm'):
if machine.startswith('arm64'):
machine = 'arm64'
elif machine.endswith('l'):
# 32-bit ARM: Standardize on ARM v6 baseline.
machine = 'armv6l'
elif machine in ('amd64',):
machine = 'x86_64'
elif machine in ('i386', 'i686'):
machine = 'x86'
# Extract architecture.
if arch == '64bit':
bits = 64
elif arch == '32bit':
bits = 32
else: # pragma: no cover
raise ValueError("Don't understand architecture [%s]" % (arch,))
return plat, machine, bits
def exe_suffix():
"""Returns either '' or '.exe' depending on the platform."""
plat, _, _ = get()
return '.exe' if plat == 'win' else ''
def cipd_os():
"""Returns the equivalent of `cipd ensure`'s ${os}.
Example: 'windows', 'mac', 'linux'
'"""
os_name, _, _ = get()
return _cipd_os(os_name)
def _cipd_os(os_name):
return os_name.replace('win', 'windows')
def cipd_arch():
"""Returns the equivalent of `cipd ensure`'s ${arch}.
Example: 'amd64', '386'
"""
_, machine, _ = get()
return _cipd_arch(machine)
def _cipd_arch(machine):
return {
'x86': '386',
'x86_64': 'amd64',
}.get(machine, machine)
def cipd_platform():
"""Return the equivalent of `cipd ensure`'s ${platform}."""
os_name, machine, _ = get()
return "%s-%s" % (_cipd_os(os_name), _cipd_arch(machine))
def cipd_all_targets():
"""Returns an iterable of (platform, arch) tuples for all supported buildslave
platforms that we expect CIPD packages to exist for.
This is used for CIPD presubmit validation.
"""
return (
('linux', '386'),
('linux', 'amd64'),
('linux', 'arm64'),
('linux', 'armv6l'),
('linux', 'mips64'),
('mac', 'amd64'),
('windows', '386'),
('windows', 'amd64'),
)
def cascade_config(config, plat=None):
"""Returns (dict): The constructed configuration dictionary.
Traverses the supplied configuration dictionary, building a cascading
configuration by folding in values of increasingly-specialized platform tuple
keys. The platform tuple that is traversed is the one returned by 'get'.
For example, on a 64-bit Linux platform with a 'config' dictionary of:
config = {
(): {
'foo': 'foo-generic',
'bar': 'bar-generic',
'baz': 'baz-generic',
},
('linux',): {
'bar': 'bar-linux',
'baz': 'baz-linux',
},
('linux', 64): {
'qux': 'qux-linux-64bit-generic',
},
('linux', 'x86_64'): {
'baz': 'baz-linux-amd64',
},
}
The resulting dictionary would be:
{
'foo': 'foo-generic',
'bar': 'bar-linux',
'baz': 'baz-linux-amd64',
'qux': 'qux-linux-64bit-generic',
}
Args:
config (dict): Dictionary keyed on platform tuples.
"""
# Cascade the platform configuration.
plat = plat or get()
result = {}
for r in xrange(len(plat)+1):
for c in itertools.combinations(plat, r):
result.update(config.get(c, {}))
return result
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module: plot_model
-------------------
Contains the main driver function and some helper functions.
F. G. Ramon-Fox 2021
Last revision: May 2021
"""
import numpy as np
import iofunctions as io
import visualization as vis
from units import Units
from galrotcurve import GalaxyRotationCurve
def main():
"""
This is the main driver function of the code.
The user specifies all the relevant parameters in this function
(see below.)
"""
# Galaxy parameters
Md = 9.0e9 # disc mass (solar masses)
Rg = 2.5 # gas disc scale radius (kpc)
Rd = 2.5 # stellar disc scale radius (kpc)
gfrac = 0.15 # gas fraction
sfrac = 0.85 # stellar fraction
# Bulge
Mb = 3.0e8 # bulge mass (solar masses)
rb = 0.4 # bulge scale radius (kpc)
# Halo parameters
Mh = 5.7e11 # halo mass (solar masses)
c = 4.0 # concentration
rs = 33.8 # halo scale radius (kpc)
# Unit parameters.
unit_mass = 1.0e5 # Solar Masses
unit_length = 0.1 # kpc
unit_velocity = 2.074756 # km/s
unit_time = 46.96926 # Myr
# Image format
img_format = "pdf"
# Include observations data
include_data = True
path = "M33_data.txt"
# Radial coordinate parameters
Rmin = 0.01 # kpc
Rmax = 15.0 # kpc
dR = 0.01 # kpc
# Initialize units container, which validates consistency with G = 1.0
units = Units(unit_mass, unit_length, unit_velocity, unit_time)
io.print_units(units)
# Create galaxy model object.
rcmodel = build_galaxy_model(Md, Rg, Rd, gfrac, sfrac, \
Mb, rb, Mh, c, rs, units)
io.print_galaxy_parameters(rcmodel)
# plot rotation curve model
plot_rotation_curve_model(rcmodel, units, Rmin, Rmax, dR=dR, \
plot_name="model_curve", fmt=img_format, \
include_data=include_data, \
data_path=path)
def build_galaxy_model(Md, Rg, Rd, gfrac, sfrac, Mb, rb, Mh, c, rs, units):
"""
Generates a GalaxyRotationCurve based on the input physical parameters of
the model. This parameters are rescaled by the unit system specified in
the units object.
Parameters
----------
Md : float or int
Disc mass in solar masses.
Rg : float or int
Gas disc scale radius in kpc.
Rd : float or int
Stellar disc scale radius in kpc.
gfrac : float or int
Gas fraction (0 to 1.).
sfrac : float or int
Stellar fraction (0 to 1.).
Mb : float or int
Bulge mass in solar masses.
rb : float or int
Bulge scale radius in kpc.
Mh : float or int
Dark halo mass in solar masses.
c : float or int
Halo concentration parameter.
rs : float or int
Halo scale radius in kpc.
units : object
Container with the unit system satisfying G=1.
Returns
-------
rcmodel : object
A GalaxyRotationCurve object representing the model.
Usage
-----
rcmod = \
build_galaxy_model(Md, Rg, Rd, gfrac, sfrac, Mb, rb, Mh, c, rs, units)
"""
# NOTE: the parameters will be validated at the instantiation
# GalaxyRotationCurve.
# Create rotationcurve object
# Disc & Gas Parameters
Md = Md/units.unit_mass
Rg = Rg/units.unit_length
Rd = Rd/units.unit_length
# Bulge
Mb = Mb/units.unit_mass
rb = rb/units.unit_length
# Halo parameters
Mh = Mh/units.unit_mass
rs = rs/units.unit_length
rcmodel = GalaxyRotationCurve(Md, Rg, Rd, gfrac, sfrac, Mb, rb, Mh, c, rs)
return rcmodel
def plot_rotation_curve_model(rcmod, units, Rmin, Rmax, dR=0.01, \
plot_name="rotation_curve", fmt="png", \
include_data=False, data_path=None):
"""
Plots the rotation curve of the model represented by rcmod. It generates
individual curves of the halo, gas disc, stellar disc, and bulge, as well
as the global model. All these results are plotted on the same figure.
Data points from an observed curve may be optionally included.
Parameters
----------
rcmod : object
a GalaxyRotationCurve object representing the model.
units : object
a Units object, must be the same one used to build rcmod.
Rmin : float
minimum radial position to plot.
Rmax : float
maximum radial position to plot.
dR : float (optional)
separation between radial positions in plot (default: 0.01 in kpc)
plot_name : str (optional)
base name of the figure output file, do not include the extension.
(default: "rotation_curve")
fmt : str (optional)
format of the image (e.g. png, pdf, eps)
include_data : bool (optional)
if True, reads observed rotation curve data points from data_path.
data_path : str (optional, necessary if include_data=True)
filename or path+filename of the observed rotation curve
Example
-------
default usage:
plot_rotation_curve_model(rcmod, units, Rmin, Rmax)
add an observed rotation curve:
plot_rotation_curve_model(rcmod, units, Rmin, Rmax, '
include_data=True, data_path="./folder1/folder2/curve.txt")
Notes
-----
data_path must point to a two column file with the 1st column containing
the radial position in kpc, and the second column the rotation curve in km/s.
See load_rot_curve_data in iofunctions for details.
"""
if not isinstance(rcmod, GalaxyRotationCurve):
raise TypeError("rcmod must be an instance of GalaxyRotationCurve.")
if not isinstance(units, Units):
raise TypeError("units must be an instance of Units.")
if not isinstance(Rmin, float) and not isinstance(Rmin, int):
raise TypeError("Rmin must be a number.")
if not isinstance(Rmax, float) and not isinstance(Rmax, int):
raise TypeError("Rmax must be a number.")
if not isinstance(dR, float):
raise TypeError("dR must be a float.")
if not isinstance(plot_name, str):
raise TypeError("plot_name must be a string.")
if not isinstance(fmt, str):
raise TypeError("fmt must be a string.")
if include_data and data_path is None:
raise TypeError("a data_path must be provided when includ_data is True.")
if include_data and not isinstance(data_path, str):
raise TypeError("data_path must be a string.")
# Generate radial position array
R = np.arange(Rmin, Rmax, dR)
Rcode = R/units.unit_length
# Generate individual curves to visualize the contribution of the
# galaxy's components.
pltdat = vis.PlotData() # Plot data container.
vr_halo = rcmod.get_halo_rotation_curve(Rcode) * units.unit_velocity
pltdat.add_plot_data(R, vr_halo, label="halo")
vr_gas = rcmod.get_disc_rotation_curve_gas(Rcode) * units.unit_velocity
pltdat.add_plot_data(R, vr_gas, label="gas disc")
vr_stars = rcmod.get_disc_rotation_curve_stars(Rcode) * units.unit_velocity
pltdat.add_plot_data(R, vr_stars, label="stellar disc")
vr_bulge = rcmod.get_bulge_rotation_curve(Rcode) * units.unit_velocity
pltdat.add_plot_data(R, vr_bulge, label="bulge")
# Get full rotation curve.
vrot_model = rcmod.get_full_rotation_curve(Rcode) * units.unit_velocity
pltdat.add_plot_data(R, vrot_model, label="Global", color="black")
# Load data from observations.
if include_data:
Rdata, vdata = io.load_rot_curve_data(data_path)
pltdat.add_plot_data(Rdata, vdata, \
label="observations", ls="none", \
marker="o", color="blue")
# Set plot limits and font sizes
pltdat.Rpos_lim = [0., Rmax]
pltdat.vrot_lim = [0., 130.]
pltdat.fontsize = 20
pltdat.legendeize = 20
# Plot the composite rotation curve.
vis.plot_composite_rotation_curve(pltdat, "rot_curve_hbd_model", fmt="pdf")
# Plot the simple rotation curve.
vis.plot_vrot_vs_radius(R, vrot_model, "global_model", label="Global")
###########
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
def sieve_of_atkin(limit: int) -> None:
"""
2 and 3 are known to be prime
"""
if limit > 2:
print(2, end=" ")
if limit > 3:
print(3, end=" ")
# Initialise the sieve array with False values
sieve: list[bool] = [False] * (limit + 1)
for i in range(0, limit + 1):
sieve[i] = False
"""
Mark sieve[n] is True if one of the following is True:
a) n = (4 * x * x) + (y * y) has odd number of solutions, i.e.,
there exist odd number of distinct pairs (x, y) that satisfy
the equation and n % 12 = 1 or n % 12 = 5.
b) n = (3 * x * x) + (y * y) has odd number of solutions and n % 12 = 7
c) n = (3 * x * x) - (y * y) has odd number of solutions,
x > y and n % 12 = 11
"""
x: int = 1
while x * x <= limit:
y = 1
while y * y <= limit:
# Main part of Sieve of Atkin.
n = (4 * x * x) + (y * y)
if (n <= limit and (n % 12 == 1 or n % 12 == 5)):
sieve[n] ^= True
n = (3 * x * x) + (y * y)
if n <= limit and n % 12 == 7:
sieve[n] ^= True
n = (3 * x * x) - (y * y)
if (x > y and n <= limit and n % 12 == 11):
sieve[n] ^= True
y += 1
x += 1
# Mark all multiples of squares as non-prime
r = 5
while r * r <= limit:
if sieve[r]:
for i in range(r * r, limit + 1, r * r):
sieve[i] = False
r += 1
# Print primes using sieve[]
for a in range(5, limit + 1):
if sieve[a]:
print(a, end=" ")
if __name__ == "__main__":
sieve_of_atkin(int(input("Enter the limit for sieve: ")))
|
nilq/baby-python
|
python
|
import cv2 as cv
import numpy as np
import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from math import degrees as dg
def cv_show(img,name='Figure'):
cv.namedWindow(name,cv.WINDOW_AUTOSIZE)
cv.imshow(name,img)
cv.waitKey(0)
cv.destroyAllWindows()
Path1 = 'F:\PyCharm\Camera_calibration_GIT\class1'
# 定义棋盘大小: 注意此处是内部的行、列角点个数,不包含最外边两列,否则会出错
chessboard_size = (15,13)
a = np.prod(chessboard_size)
# 生成195×3的矩阵,用来保存棋盘图中15*13个内角点的3D坐标,也就是物体点坐标
objp = np.zeros((np.prod(chessboard_size), 3), dtype=np.float32)
# 通过np.mgrid生成对象的xy坐标点,每个棋盘格大小是18mm
# 最终得到z=0的objp为(0,0,0), (1*13,0,0), (2*13,0,0) ,...
objp[:, :2] = np.mgrid [0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2) * 18
# print("object is %f", objp)
# 定义数组,来保存监测到的点
obj_points = [] # 保存世界坐标系的三维点
img_points = [] # 保存图片坐标系的二维点
# 设置终止条件: 迭代30次或者变动 < 0.001
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# 读取目录下的所有图片
calibration_paths = glob.glob(Path1+'\*.jpg')
# 为方便显示使用tqdm显示进度条
for image_path in tqdm(calibration_paths):
# 读取图片
img = cv.imread(image_path)
# x,y = img.shape[:2]
# ratio = y/x
# img = cv.resize(img, (int(750*ratio),750))
# 图像二值化
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# cv_show(gray)
# 找到棋盘格内角点位置
ret, corners = cv.findChessboardCorners(gray, chessboard_size, None)
if ret == True:
obj_points.append(objp)
# 亚像素级角点检测,在角点检测中精确化角点位置
corners2 = cv.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria)
img_points.append(corners2)
# 在图中标注角点,方便查看结果
img = cv.drawChessboardCorners(img, chessboard_size, corners2, ret)
# img = cv.resize(img, (400,600))
cv_show(img)
print("finish all the pic count")
# 相机标定
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(obj_points, img_points, gray.shape, None, None)
# 其中fx = f/dX ,fy = f/dY ,分别称为x轴和y轴上的归一化焦距
#u0和v0则表示的是光学中心,即摄像机光轴与图像平面的交点,通常位于图像中心处,故其值常取分辨率的一半。
# 显示和保存参数
print("#######相机内参#######")
print(mtx)
print("#######畸变系数#######")
print(dist)
print("#######相机旋转矩阵#######")
print(rvecs)
print("#######相机平移矩阵#######")
print(tvecs)
np.savez(Path1+'\class_mtx.npz', mtx=mtx, dist=dist, rvecs=rvecs, tvecs=tvecs) #分别使用mtx,dist,rvecs,tvecs命名数组
# mtx_mat = np.mat(mtx)
# mtx_mat_T = mtx_mat.I
# #定义像素坐标系中的点
# point1_uv = np.mat([20,30,1])
# point1_xy = np.dot(mtx_mat_T,point1_uv.T)
# print(point1_xy)
# --------------------------------------------------------
# 使用一张图片看看去畸变之后的效果
img2 = cv.imread(Path1+r'\028.jpg')
# img2 = cv.resize(img2, (int(750 * ratio), 750))
cv_show(img2)
print("orgininal img_point array shape",img2.shape)
# img2.shape[:2]取图片 高、宽;
h, w = img2.shape[:2]
print("pic's hight, weight: %f, %f"%(h, w))
# img2.shape[:3]取图片的 高、宽、通道
# h, w ,n= img2.shape[:3]
# print("PIC shape", (h, w, n))
newCameraMtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h),1, (w, h)) # 自由比例参数
dst = cv.undistort(img2, mtx, dist, None, newCameraMtx)
# 根据前面ROI区域裁剪图片
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv_show(dst)
cv.imwrite(r'F:\PyCharm\Camera_calibration_GIT\Camera calibration\Calibresult5.jpg', dst)
# --------------------------------------------------------
# 计算所有图片的平均重投影误差
total_error = 0
for i in range(len(obj_points)):
img_points2, _ = cv.projectPoints(obj_points[i], rvecs[i], tvecs[i], mtx, dist)
error = cv.norm(img_points[i], img_points2, cv.NORM_L2)/len(img_points2)
total_error += error
print("total error: {}".format(total_error/len(obj_points)))
# --------------------------------------------------------
# 加载相机标定的内参数、外参数矩阵
with np.load(Path1+r'\class_mtx.npz') as X:
mtx, dist, _, _ = [X[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]
print("#######加载相机内参和畸变矩阵#######")
print(mtx, dist)
# --------------------------------------------------------
# # 定义棋盘大小
chessboard_size = (15,13)
# 世界坐标系下的物体位置矩阵(Z=0)
objp = np.zeros((np.prod(chessboard_size), 3), dtype=np.float32)
objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2) * 18
# 像素坐标
test_img = cv.imread(Path1+r"\026.jpg")
gray = cv.cvtColor(test_img, cv.COLOR_BGR2GRAY)
# cv_show(test_img)
# 找到图像平面点角点坐标
ret, corners = cv.findChessboardCorners(gray, chessboard_size, None)
if ret:
_, R, T, _, = cv.solvePnPRansac(objp, corners, mtx, dist)
print("旋转向量", R)
print("平移向量", T)
sita_x = dg(R[0][0])
sita_y = dg(R[1][0])
sita_z = dg(R[2][0])
print("sita_x is ", sita_x,'度')
print("sita_y is ", sita_y,'度')
print("sita_z is ", sita_z,'度')
# --------------------------------------------------------
# --------------------------------------------------------
# --------------------------------------------------------
# # 加载相机标定的数据
# with np.load(r'F:\PyCharm\Camera calibration\class3\class3.npz') as X:
# mtx, dist, _, _ = [X[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]
def draw(img, corners, imgpts):
"""
在图片上画出三维坐标轴
:param img: 图片原数据
:param corners: 图像平面点坐标点
:param imgpts: 三维点投影到二维图像平面上的坐标
:return:
"""
# corners[0]是图像坐标系的坐标原点;imgpts[0]-imgpts[3] 即3D世界的坐标系点投影在2D世界上的坐标
corner = tuple(corners[0].ravel())
# 沿着3个方向分别画3条线
cv.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 2)
cv.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 2)
cv.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 2)
return img
# #定义棋盘大小
# chessboard_size = (15,13)
# 初始化目标坐标系的3D点
objp = np.zeros((np.prod(chessboard_size),3),dtype=np.float32)
objp[:,:2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1,2)*18
# 初始化三维坐标系
axis = np.float32([[90, 0, 0], [0, 90, 0], [0, 0, -90]]).reshape(-1, 3) # 坐标轴
# 加载打包所有图片数据
images = glob.glob(Path1+r'\026.jpg')
for fname in images:
img = cv.imread(fname)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv_show(img)
# 找到图像平面点坐标点
ret, corners = cv.findChessboardCorners(gray, chessboard_size, None)
if ret:
# PnP计算得出旋转向量和平移向量
_, rvecs, tvecs, _ = cv.solvePnPRansac(objp, corners, mtx, dist)
print("旋转变量", rvecs)
print("平移变量", tvecs)
# 计算三维点投影到二维图像平面上的坐标
imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist)
# 把坐标显示图片上
img = draw(img, corners, imgpts)
cv.imwrite(r"F:\PyCharm\Camera_calibration_GIT\3d_2d_project\3d_2d_project5.jpg",img)
cv_show(img)
# cv.destroyAllWindows()
|
nilq/baby-python
|
python
|
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# * File:
# * engine.py
# *
# * Library:
# * ebpf_ic/
# *
# * Author:
# * Lucas Duarte (lucas.f.duarte@ufv.br)
# *
# * Description:
# * Conversion and translation methods
# *
from Instruction import *
from data import *
from lib import *
def x64_x32_inst (op, args, line):
"""
Converts x64 and x32 type instructions.
Args:
args: the operation (eg. mov), a list of arguments (eg. r0, r2) and the
correspondent line on input file.
Returns:
instruction: instruction converted into machine code.
Raises:
None
"""
inst = Instruction()
if len(args) == 2 and op != 'neg' and op != 'neg32':
if isRegValid(args[0]) and not isRegValid(args[1]):
if isNumericDataValid(args[1]):
inst.setDst(reg_set[args[0]])
inst.setImm(completeBinary(dataTypeConversor(args[1]), 32))
inst.setOpc(x64_x32_inst_set[op]['opcodeImm'])
else:
print("ebpf_ic: line " + str(line) + ": invalid immediate")
return None
elif isRegValid(args[0]) and isRegValid(args[1]):
inst.setDst(reg_set[args[0]])
inst.setSrc(reg_set[args[1]])
inst.setOpc(x64_x32_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": invalid arguments")
return None
elif len(args) == 1 and op == 'neg' or op == 'neg32':
if isRegValid(args[0]):
inst.setDst(reg_set[args[0]])
else:
print("ebpf_ic: line " + str(line) + ": " + args[0] + ": unknown register")
return None
inst.setOpc(x64_x32_inst_set[op]['opcode'])
elif len(args) > 2:
print("ebpf_ic: line " + str(line) + ": too many arguments")
return None
else:
print("ebpf_ic: line " + str(line) + ": not enough arguments")
return None
return inst.toString()
def byteswap_inst (op, args, line):
"""
Converts byteswap type instructions.
Args:
args: the operation, a list of arguments and the correspondent line on
input file.
Returns:
instruction: instruction converted into machine code.
Raises:
None
"""
inst = Instruction()
if len(args) > 1:
print("ebpf_ic: line " + str(line) + ": too many arguments")
return None
elif len(args) < 1:
print("ebpf_ic: line " + str(line) + ": not enough arguments")
return None
else:
if isRegValid(args[0]):
inst.setDst(reg_set[args[0]])
inst.setImm(completeBinary('0' + bin(int(byteswap_inst_set[op]['imm'], 16))[2:], 32))
inst.setOpc(byteswap_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": " + args[0] + ": unknown register")
return None
return inst.toString()
def memory_inst (op, args, line):
"""
Converts memory access type instructions.
Args:
args: the operation, a list of arguments and the correspondent line on
input file.
Returns:
instruction: instruction converted into machine code.
Raises:
None
"""
inst = Instruction()
if len(args) == 2:
if op == 'lddw':
if isRegValid(args[0]) and not isRegValid(args[1]):
if isNumericDataValid(args[1]):
inst.setDst(reg_set[args[0]])
inst.setImm(completeBinary(dataTypeConversor(args[1]), 32))
inst.setOpc(memory_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": invalid immediate")
return None
else:
print("ebpf_ic: line " + str(line) + ": invalid arguments")
return None
else:
if isRegValid(args[0]) and not isRegValid(args[1]):
memoryArgs = isMemoryAccessValid(args[1])
if memoryArgs == None:
print("ebpf_ic: line " + str(line) + ": invalid memory access operation")
return None
if isRegValid(memoryArgs[0]):
if isNumericDataValid(memoryArgs[1]):
inst.setSrc(reg_set[memoryArgs[0]])
inst.setDst(reg_set[args[0]])
inst.setOff(completeBinary(dataTypeConversor(memoryArgs[1], False), 16))
inst.setOpc(memory_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": invalid offset")
return None
else:
print("ebpf_ic: line " + str(line) + ": unknown register")
return None
elif not isRegValid(args[0]) and isRegValid(args[1]):
memoryArgs = isMemoryAccessValid(args[0])
if memoryArgs == None:
print("ebpf_ic: line " + str(line) + ": invalid memory access operation")
return None
if isRegValid(memoryArgs[0]):
if isNumericDataValid(memoryArgs[1]):
inst.setSrc(reg_set[args[1]])
inst.setDst(reg_set[memoryArgs[0]])
inst.setOff(completeBinary(dataTypeConversor(memoryArgs[1], False), 16))
inst.setOpc(memory_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": invalid offset")
return None
else:
print("ebpf_ic: line " + str(line) + ": unknown register")
return None
elif not isRegValid(args[0]) and not isRegValid(args[1]):
memoryArgs = isMemoryAccessValid(args[0])
if memoryArgs == None:
print("ebpf_ic: line " + str(line) + ": invalid memory access operation")
return None
if isRegValid(memoryArgs[0]):
if isNumericDataValid(memoryArgs[1]):
if isNumericDataValid(args[1]):
inst.setDst(reg_set[memoryArgs[0]])
inst.setImm(completeBinary(dataTypeConversor(args[1]), 32))
inst.setOff(completeBinary(dataTypeConversor(memoryArgs[1], False), 16))
inst.setOpc(memory_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": invalid immediate")
return None
else:
print("ebpf_ic: line " + str(line) + ": invalid offset")
return None
else:
print("ebpf_ic: line " + str(line) + ": unknown register")
return None
elif len(args) == 3:
if isRegValid(args[0]) and isRegValid(args[1]):
if isNumericDataValid(args[2]):
inst.setSrc(reg_set[args[0]])
inst.setDst(reg_set[args[1]])
inst.setImm(completeBinary(dataTypeConversor(args[2]), 32))
inst.setOpc(memory_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": invalid immediate")
return None
else:
print("ebpf_ic: line " + str(line) + ": unknown register")
return None
elif len(args) > 3:
print("ebpf_ic: line " + str(line) + ": too many arguments")
return None
else:
print("ebpf_ic: line " + str(line) + ": not enough arguments")
return None
return inst.toString()
def branch_inst (op, args, line):
"""
Converts branch type instructions.
Args:
args: the operation, a list of arguments and the correspondent line on
input file.
Returns:
instruction: instruction converted into machine code.
Raises:
None
"""
inst = Instruction()
if len(args) == 3:
if isNumericDataValid(args[2]):
if isRegValid(args[0]):
if isRegValid(args[1]):
inst.setSrc(reg_set[args[1]])
inst.setOpc(branch_inst_set[op]['opcode'])
elif isNumericDataValid(args[1]):
inst.setImm(completeBinary(dataTypeConversor(args[1]), 32))
inst.setOpc(branch_inst_set[op]['opcodeImm'])
else:
print("ebpf_ic: line " + str(line) + ": invalid arguments")
return None
inst.setDst(reg_set[args[0]])
inst.setOff(completeBinary(dataTypeConversor(args[2], False), 16))
else:
print("ebpf_ic: line " + str(line) + ": unknown register")
return None
else:
print("ebpf_ic: line " + str(line) + ": invalid offset")
return None
elif len(args) == 1:
if isNumericDataValid(args[0]):
if op == 'ja':
inst.setOff(completeBinary(dataTypeConversor(args[0], False), 16))
elif op == 'call':
inst.setImm(completeBinary(dataTypeConversor(args[0]), 32))
inst.setOpc(branch_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": invalid arguments")
return None
elif len(args) == 0:
if op == 'exit':
inst.setOpc(branch_inst_set[op]['opcode'])
else:
print("ebpf_ic: line " + str(line) + ": not enough arguments")
return None
elif len(args) > 3:
print("ebpf_ic: line " + str(line) + ": too many arguments")
return None
else:
if op == 'ja' or op == 'call':
print("ebpf_ic: line " + str(line) + ": too many arguments")
return None
else:
print("ebpf_ic: line " + str(line) + ": not enough arguments")
return None
return inst.toString()
# List of available instructions and its correspondent translation methods
instr_set = {
'neg' : x64_x32_inst,
'add' : x64_x32_inst,
'sub' : x64_x32_inst,
'mul' : x64_x32_inst,
'div' : x64_x32_inst,
'or' : x64_x32_inst,
'and' : x64_x32_inst,
'lsh' : x64_x32_inst,
'rsh' : x64_x32_inst,
'neg' : x64_x32_inst,
'mod' : x64_x32_inst,
'xor' : x64_x32_inst,
'mov' : x64_x32_inst,
'arsh' : x64_x32_inst,
'neg32' : x64_x32_inst,
'add32' : x64_x32_inst,
'sub32' : x64_x32_inst,
'mul32' : x64_x32_inst,
'div32' : x64_x32_inst,
'or32' : x64_x32_inst,
'and32' : x64_x32_inst,
'lsh32' : x64_x32_inst,
'rsh32' : x64_x32_inst,
'neg32' : x64_x32_inst,
'mod32' : x64_x32_inst,
'xor32' : x64_x32_inst,
'mov32' : x64_x32_inst,
'arsh32' : x64_x32_inst,
'le16' : byteswap_inst,
'le32' : byteswap_inst,
'le64' : byteswap_inst,
'be16' : byteswap_inst,
'be32' : byteswap_inst,
'be64' : byteswap_inst,
'lddw' : memory_inst,
'ldabsw' : memory_inst,
'ldabsh' : memory_inst,
'ldabsb' : memory_inst,
'ldabsdw' : memory_inst,
'ldindw' : memory_inst,
'ldindh' : memory_inst,
'ldindb' : memory_inst,
'ldinddw' : memory_inst,
'ldxw' : memory_inst,
'ldxh' : memory_inst,
'ldxb' : memory_inst,
'ldxdw' : memory_inst,
'stw' : memory_inst,
'sth' : memory_inst,
'stb' : memory_inst,
'stdw' : memory_inst,
'stxw' : memory_inst,
'stxh' : memory_inst,
'stxb' : memory_inst,
'stxdw' : memory_inst,
'ja' : branch_inst,
'jeq' : branch_inst,
'jgt' : branch_inst,
'jge' : branch_inst,
'jlt' : branch_inst,
'jle' : branch_inst,
'jset' : branch_inst,
'jne' : branch_inst,
'jsgt' : branch_inst,
'jsge' : branch_inst,
'jslt' : branch_inst,
'jsle' : branch_inst,
'call' : branch_inst,
'exit' : branch_inst
}
|
nilq/baby-python
|
python
|
import os
import shutil
import hashlib
from django.contrib.auth.models import User
from django.core import mail
from django.urls import reverse
from django.test import TestCase
from django.conf import settings
from tagging.utils import edit_string_for_tags
from djangopeople.djangopeople.models import DjangoPerson, Country
from djangopeople.machinetags.utils import tagdict
class EditViewTest(TestCase):
fixtures = ['test_data']
def setUp(self): # noqa
self.client.login(username='daveb', password='123456')
with open(os.path.join(settings.OUR_ROOT, 'djangopeople/fixtures/pony.gif'), 'rb') as f:
sha1sum = hashlib.sha1(f.read()).hexdigest()
self.hashed_upload_img_file_name = os.path.join(sha1sum[:1],
sha1sum[1:2], sha1sum)
# make sure the profile upload folder exists
self.profile_img_path = os.path.join(settings.MEDIA_ROOT, 'profiles')
if not os.path.exists(self.profile_img_path):
os.makedirs(self.profile_img_path)
def tearDown(self): # noqa
# remove uploaded profile picture
if os.path.exists(self.profile_img_path):
shutil.rmtree(self.profile_img_path)
def test_edit_finding_permissions(self):
'''
logged in user can only edit his own skills
'''
url = reverse('edit_finding', args=['daveb'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
url = reverse('edit_finding', args=['satchmo'])
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
def test_edit_finding_initial_data(self):
url_edit_finding = reverse('edit_finding', args=['daveb'])
p = DjangoPerson.objects.get(user__username='daveb')
mtags = tagdict(p.machinetags.all())
response = self.client.get(url_edit_finding)
self.assertContains(response, mtags['profile']['looking_for_work'])
self.assertContains(response, mtags['im']['django'])
self.assertContains(response, p.user.email)
def test_edit_finding_email(self):
url_edit_finding = reverse('edit_finding', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
new_email = 'foo@bar.com'
data = {'email': new_email,
'first_name': 'Test',
'last_name': 'User',
'privacy_search': 'public',
'privacy_email': 'private',
'privacy_im': 'private',
'privacy_irctrack': 'public'}
u = User.objects.get(username='daveb')
self.assertNotEqual(u.first_name, 'Test')
self.assertNotEqual(u.last_name, 'User')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertRedirects(response, url_profile)
self.assertContains(response, new_email)
u = User.objects.get(username='daveb')
self.assertEqual(u.email, new_email)
self.assertEqual(u.first_name, 'Test')
self.assertEqual(u.last_name, 'User')
def test_edit_finding_looking_for_work(self):
url_edit_finding = reverse('edit_finding', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
new_email = 'foo@bar.com'
looking_for_work = 'freelance'
data = {'looking_for_work': looking_for_work,
'email': new_email,
'first_name': 'Hello',
'last_name': 'World',
'privacy_search': 'public',
'privacy_email': 'private',
'privacy_im': 'private',
'privacy_irctrack': 'public'}
p = DjangoPerson.objects.get(user__username='daveb')
mtags = tagdict(p.machinetags.all())
self.assertEqual(mtags['profile']['looking_for_work'], 'full-time')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertRedirects(response, url_profile)
p = DjangoPerson.objects.get(user__username='daveb')
mtags = tagdict(p.machinetags.all())
self.assertEqual(mtags['profile']['looking_for_work'], 'freelance')
# check initial value
response = self.client.get(url_edit_finding)
self.assertContains(response, looking_for_work)
def test_edit_finding_im(self):
url_edit_finding = reverse('edit_finding', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
new_email = 'foo@bar.com'
im_jabber = 'daveb@jabber.org'
data = {'im_jabber': im_jabber,
'email': new_email,
'first_name': 'Hello',
'last_name': 'World',
'privacy_search': 'public',
'privacy_email': 'private',
'privacy_im': 'private',
'privacy_irctrack': 'public'}
p = DjangoPerson.objects.get(user__username='daveb')
mtags = tagdict(p.machinetags.all())
self.assertEqual(mtags['im']['jabber'], '')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertRedirects(response, url_profile)
p = DjangoPerson.objects.get(user__username='daveb')
mtags = tagdict(p.machinetags.all())
self.assertEqual(mtags['im']['jabber'], im_jabber)
# check initial value
response = self.client.get(url_edit_finding)
self.assertContains(response, im_jabber)
def test_edit_finding_services(self):
url_edit_finding = reverse('edit_finding', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
service_twitter = 'https://twitter.com/davebbar'
data = {'service_twitter': service_twitter,
'email': 'foo@bar.com',
'first_name': 'Hello',
'last_name': 'World',
'privacy_search': 'public',
'privacy_email': 'private',
'privacy_im': 'private',
'privacy_irctrack': 'public'}
p = DjangoPerson.objects.get(user__username='daveb')
mtags = tagdict(p.machinetags.all())
self.assertEqual(mtags['services']['twitter'], '')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertRedirects(response, url_profile)
p = DjangoPerson.objects.get(user__username='daveb')
mtags = tagdict(p.machinetags.all())
self.assertEqual(mtags['services']['twitter'], service_twitter)
# check initial value
response = self.client.get(url_edit_finding)
self.assertContains(response, service_twitter)
def test_edit_finding_form_error_email_validation(self):
url_edit_finding = reverse('edit_finding', args=['daveb'])
u = User.objects.get(username='daveb')
old_email = u.email
other_user = User.objects.get(username='satchmo')
# set new email for daveb to existing email of user satchmo
data = {'email': other_user.email,
'privacy_search': 'public',
'privacy_email': 'private',
'privacy_im': 'private',
'privacy_irctrack': 'public'}
u = User.objects.get(username='daveb')
self.assertEqual(u.email, old_email)
response = self.client.post(url_edit_finding, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'email',
'That e-mail is already in use')
u = User.objects.get(username='daveb')
self.assertEqual(u.email, old_email)
def test_edit_finding_form_error_fields_required(self):
url_edit_finding = reverse('edit_finding', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
data = {'email': 'foo@bar.com',
'first_name': 'Hello',
'last_name': 'World',
'privacy_search': 'public',
'privacy_email': 'private',
'privacy_im': 'private',
'privacy_irctrack': 'public'}
response = self.client.post(url_edit_finding, data, follow=True)
self.assertRedirects(response, url_profile)
data.pop('email')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'email',
'This field is required.')
data.pop('privacy_search')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'email',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_search',
'This field is required.')
data.pop('privacy_email')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'email',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_search',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_email',
'This field is required.')
data.pop('privacy_im')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'email',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_search',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_email',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_im',
'This field is required.')
data.pop('privacy_irctrack')
response = self.client.post(url_edit_finding, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'email',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_search',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_email',
'This field is required.')
self.assertFormError(response, 'form', 'privacy_irctrack',
'This field is required.')
def test_edit_skill_permission(self):
'''
logged in user can only edit his own skills
'''
url_edit_skills = reverse('edit_skills', args=['daveb'])
response = self.client.get(url_edit_skills)
self.assertEqual(response.status_code, 200)
response = self.client.post(url_edit_skills)
self.assertEqual(response.status_code, 302)
url_edit_skills = reverse('edit_skills', args=['satchmo'])
response = self.client.get(url_edit_skills)
self.assertEqual(response.status_code, 403)
response = self.client.post(url_edit_skills)
self.assertEqual(response.status_code, 403)
def test_add_skills(self):
'''
test adding skills
'''
url_edit_skills = reverse('edit_skills', args=['daveb'])
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(len(p.skilltags), 3)
self.assertTrue('jazz' in edit_string_for_tags(p.skilltags))
self.assertTrue('linux' in edit_string_for_tags(p.skilltags))
self.assertTrue('python' in edit_string_for_tags(p.skilltags))
skills = '%s django' % (edit_string_for_tags(p.skilltags))
self.client.post(url_edit_skills, {'skills': skills})
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(len(p.skilltags), 4)
self.assertTrue('jazz' in edit_string_for_tags(p.skilltags))
self.assertTrue('linux' in edit_string_for_tags(p.skilltags))
self.assertTrue('python' in edit_string_for_tags(p.skilltags))
self.assertTrue('django' in edit_string_for_tags(p.skilltags))
def test_delete_skill(self):
'''
test deleting skills
'''
url_edit_skills = reverse('edit_skills', args=['daveb'])
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(len(p.skilltags), 3)
self.assertTrue('jazz' in edit_string_for_tags(p.skilltags))
self.assertTrue('linux' in edit_string_for_tags(p.skilltags))
self.assertTrue('python' in edit_string_for_tags(p.skilltags))
# delete jazz skill
skills = 'linux python'
self.client.post(url_edit_skills, {'skills': skills})
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(len(p.skilltags), 2)
self.assertTrue('linux' in edit_string_for_tags(p.skilltags))
self.assertTrue('python' in edit_string_for_tags(p.skilltags))
self.assertFalse('jazz' in edit_string_for_tags(p.skilltags))
# delete all skills
self.client.post(url_edit_skills, {'skills': ''})
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(len(p.skilltags), 0)
self.assertEqual(edit_string_for_tags(p.skilltags), '')
def test_edit_account_permission(self):
'''
logged in user can only edit his own account
'''
url_edit_account = reverse('edit_account', args=['daveb'])
response = self.client.get(url_edit_account)
self.assertEqual(response.status_code, 200)
url_edit_account = reverse('edit_account', args=['satchmo'])
response = self.client.get(url_edit_account)
self.assertEqual(response.status_code, 403)
def test_edit_account(self):
'''
add and change openid
'''
url_profile = reverse('user_profile', args=['daveb'])
url_edit_account = reverse('edit_account', args=['daveb'])
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(p.openid_server, '')
self.assertEqual(p.openid_delegate, '')
response = self.client.post(url_edit_account,
{'openid_server': 'http://example.com',
'openid_delegate': 'http://google.com'})
self.assertRedirects(response, url_profile)
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(p.openid_server, 'http://example.com')
self.assertEqual(p.openid_delegate, 'http://google.com')
# test display openid change form (with initial data)
response = self.client.get(url_edit_account)
self.assertHTMLEqual(
response.content.decode('utf8').split(
'<label for="id_openid_server">OpenID server:</label>'
)[1].split('</div>')[0],
(
'<input id="id_openid_server" type="url" '
'name="openid_server" value="http://example.com" '
'maxlength="255" />')
)
self.assertHTMLEqual(
response.content.decode('utf8').split(
'<label for="id_openid_delegate">OpenID delegate:</label>'
)[1].split('</div>')[0],
(
'<input id="id_openid_delegate" '
'type="url" name="openid_delegate" '
'value="http://google.com" '
'maxlength="255" />'
)
)
# test change openid settings
response = self.client.post(url_edit_account,
{'openid_server': 'http://test.com',
'openid_delegate': 'http://yahoo.com'})
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(p.openid_server, 'http://test.com')
self.assertEqual(p.openid_delegate, 'http://yahoo.com')
def test_edit_account_form_error(self):
'''
check AccountForm error messages
'''
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(p.openid_server, '')
self.assertEqual(p.openid_delegate, '')
url_edit_account = reverse('edit_account', args=['daveb'])
response = self.client.post(url_edit_account,
{'openid_server': 'example',
'openid_delegate': 'fooBar'})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'openid_server',
'Enter a valid URL.')
self.assertFormError(response, 'form', 'openid_delegate',
'Enter a valid URL.')
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(p.openid_server, '')
self.assertEqual(p.openid_delegate, '')
def test_change_portfolio_entry(self):
url_profile = reverse('user_profile', args=['daveb'])
url_edit_portfolio = reverse('edit_portfolio', args=['daveb'])
response = self.client.get(url_profile)
self.assertContains(response, '<li><a href="http://example.org/" '
'class="url" rel="nofollow"><cite>'
'cheese-shop</cite></a></li>')
# test change existing portfolio entry
response = self.client.post(url_edit_portfolio,
{'title_1': 'chocolate shop',
'url_1': 'cs.org'}, follow=True)
self.assertRedirects(response, url_profile)
self.assertNotContains(response, '<li><a href="http://example.org/" '
'class="url" rel="nofollow"><cite>'
'cheese-shop</cite></a></li>')
self.assertContains(response, '<li><a href="http://cs.org" class="url'
'" rel="nofollow"><cite>chocolate shop'
'</cite></a></li>')
def test_remove_portfolio_entry(self):
# test remove existing portfolio entry
url_profile = reverse('user_profile', args=['daveb'])
url_edit_portfolio = reverse('edit_portfolio', args=['daveb'])
response = self.client.post(url_edit_portfolio,
{'title_1': '', 'url_1': ''}, follow=True)
self.assertRedirects(response, url_profile)
self.assertNotContains(response, '<li><a href="http://example.org/" '
'class="url" rel="nofollow"><cite>'
'cheese-shop</cite></a></li>')
self.assertNotContains(response, '<li><a href="cs.org/" class="url" '
'rel="nofollow"><cite>chocolate shop'
'</cite></a></li>')
self.assertContains(response, 'Add some sites')
def test_add_portfolio_entry(self):
# test add new portfolio entry
url_profile = reverse('user_profile', args=['daveb'])
url_edit_portfolio = reverse('edit_portfolio', args=['daveb'])
response = self.client.post(url_edit_portfolio,
{'title_1': 'chocolate shop',
'url_1': 'cs.org'},
follow=True)
self.assertRedirects(response, url_profile)
self.assertNotContains(response, 'Add some sites')
self.assertContains(response, '<li><a href="http://cs.org" class="url'
'" rel="nofollow"><cite>chocolate shop'
'</cite></a></li>')
def test_portfolio_form_url_error(self):
# test portfolio edit form
url_edit_portfolio = reverse('edit_portfolio', args=['daveb'])
response = self.client.get(url_edit_portfolio)
self.assertHTMLEqual(
response.content.decode('utf8').split(
'<label for="id_title_1">Title 1:</label>'
)[1].split('</div>')[0],
(
'<input id="id_title_1" type="text" '
'name="title_1" value="cheese-shop" '
'maxlength="100" />'
)
)
self.assertHTMLEqual(
response.content.decode('utf8').split(
'<label for="id_url_1">URL 1:</label>'
)[1].split('</div>')[0],
(
'<input id="id_url_1" type="url" '
'name="url_1" value="http://example.org/'
'" maxlength="255" />'
)
)
self.assertHTMLEqual(
response.content.decode('utf8').split(
'<label for="id_title_2">Title 2:</label>'
)[1].split('</div>')[0],
(
'<input id="id_title_2" type="text" '
'name="title_2" maxlength="100" />'
)
)
self.assertHTMLEqual(
response.content.decode('utf8').split(
'<label for="id_url_2">URL 2:</label>'
)[1].split('</div>')[0],
(
'<input id="id_url_2" type="url" '
'name="url_2" maxlength="255" />'
)
)
# test form error messages
response = self.client.post(url_edit_portfolio,
{'title_1': 'chocolate shop',
'url_1': 'no url'},
follow=True)
self.assertFormError(response, 'form', 'url_1', 'Enter a valid URL.')
def test_edit_other_user(self):
# test editing another users portfolio
# add new user
user = User.objects.create_user('testuser', 'foo@example.com', 'pass')
DjangoPerson.objects.create(
user=user,
country=Country.objects.get(pk=1),
latitude=44,
longitude=2,
location_description='Somewhere',
)
url_profile = reverse('user_profile', args=['testuser'])
url_edit_portfolio = reverse('edit_portfolio', args=['testuser'])
# no Add some sites link for user daveb on testuser's profile page
response = self.client.get(url_profile)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'Add some sites')
# daveb can't add sites to testuser's portfolio
response = self.client.post(url_edit_portfolio,
{'title_1': 'chocolate shop',
'url_1': 'cs.org'}, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.get(url_profile)
self.assertNotContains(response, '<li><a href="http://cs.org/" class="'
'url" rel="nofollow"><cite>chocolate '
'shop </cite></a></li>')
def test_edit_password_permission(self):
'''
logged in user can only edit his own password
'''
url_edit_password = reverse('edit_password', args=['daveb'])
# user can edit his own password
response = self.client.get(url_edit_password)
self.assertEqual(response.status_code, 200)
response = self.client.post(url_edit_password)
self.assertEqual(response.status_code, 200)
# user can't edit passwords of other users
url_edit_password = reverse('edit_password', args=['satchmo'])
response = self.client.get(url_edit_password)
self.assertEqual(response.status_code, 403)
response = self.client.post(url_edit_password)
self.assertEqual(response.status_code, 403)
def test_edit_password(self):
'''
test editing passwords
'''
url_edit_password = reverse('edit_password', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
response = self.client.get(url_edit_password)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'edit_password.html')
u = User.objects.get(username='daveb')
self.assertTrue(u.check_password('123456'))
response = self.client.post(url_edit_password,
{'current_password': '123456',
'password1': 'foo',
'password2': 'foo'})
self.assertRedirects(response, url_profile)
u = User.objects.get(username='daveb')
self.assertTrue(u.check_password('foo'))
def test_edit_password_form_current_password_error(self):
'''
test form error messages when current password is invalid
'''
url_edit_password = reverse('edit_password', args=['daveb'])
response = self.client.post(url_edit_password,
{'current_password': 'invalid pw',
'password1': 'foo1',
'password2': 'foo'})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'current_password',
'Please submit your current password.')
def test_edit_password_form_error_fields_required(self):
'''
test form error messages when form fields are empty
'''
url_edit_password = reverse('edit_password', args=['daveb'])
response = self.client.post(url_edit_password, {'password1': 'foo1'})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'password2',
'This field is required.')
response = self.client.post(url_edit_password, {'password2': 'foo1'})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'password1',
'This field is required.')
response = self.client.post(url_edit_password, {})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'password1',
'This field is required.')
self.assertFormError(response, 'form', 'password2',
'This field is required.')
def test_edit_password_form_error_different_passwords(self):
'''
test form error message when user submits two different
passwords
'''
url_edit_password = reverse('edit_password', args=['daveb'])
u = User.objects.get(username='daveb')
self.assertTrue(u.check_password('123456'))
# two passwords aren't the same
response = self.client.post(url_edit_password, {'password1': 'foo1',
'password2': 'foo'})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', None,
'The passwords did not match.')
u = User.objects.get(username='daveb')
self.assertTrue(u.check_password('123456'))
def test_edit_bio_permission(self):
'''
logged in user can only edit his own bio
'''
url = reverse('edit_bio', args=['daveb'])
# user can edit his own password
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
# user can't edit passwords of other users
url = reverse('edit_bio', args=['satchmo'])
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
def test_edit_bio(self):
'''
test changing the bio
'''
url_edit_bio = reverse('edit_bio', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
response = self.client.get(url_edit_bio)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'edit_bio.html')
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(p.bio, 'ad')
bio_string = 'I do a lot of Django stuff'
response = self.client.post(url_edit_bio,
{'bio': bio_string}, follow=True)
self.assertRedirects(response, url_profile)
self.assertContains(response, bio_string)
self.assertContains(response, 'edit bio')
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(p.bio, bio_string)
def test_delete_bio(self):
url_edit_bio = reverse('edit_bio', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
response = self.client.post(url_edit_bio,
{'bio': ''}, follow=True)
self.assertRedirects(response, url_profile)
self.assertContains(response, 'Create your bio')
p = DjangoPerson.objects.get(user__username='daveb')
self.assertEqual(p.bio, '')
def test_edit_location_permission(self):
'''
logged in user can only edit his own location
'''
url = reverse('edit_location', args=['daveb'])
# user can edit his own password
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
# user can't edit passwords of other users
url = reverse('edit_location', args=['satchmo'])
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
def test_edit_location(self):
'''
test changing the location
'''
longitude = 14.9853515625
latitude = 50.0359736721955
location_description = 'Vienna, Austria'
country = 12 # id of Austria
url_edit_location = reverse('edit_location', args=['daveb'])
url_profile = reverse('user_profile', args=['daveb'])
response = self.client.get(url_profile)
self.assertContains(response, 'Austria')
self.assertContains(response, 'data-shrinklat="%d' % latitude)
self.assertContains(response, 'data-shrinklon="%d' % longitude)
p = DjangoPerson.objects.get(user__username='daveb')
self.assertTrue(abs(p.latitude - latitude) < 0.01)
self.assertTrue(abs(p.longitude - longitude) < 0.01)
self.assertEqual(p.location_description, location_description)
self.assertEqual(p.country.pk, country)
response = self.client.get(url_edit_location)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'edit_location.html')
new_longitude = 153.023071289
new_latitude = -27.5411533739
new_location_description = 'Brisbane'
new_country = 'AU' # iso code of Australia
location_dict = {'longitude': new_longitude,
'latitude': new_latitude,
'location_description': new_location_description,
'country': new_country,
'region': 'AL'}
response = self.client.post(url_edit_location, location_dict)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'region',
('The region you selected does not match the '
'country'))
del location_dict['region']
response = self.client.post(url_edit_location, data=location_dict,
follow=True)
self.assertRedirects(response, url_profile)
self.assertNotContains(response, 'Austria')
self.assertNotContains(response, 'data-shrinklat="%d' % latitude)
self.assertNotContains(response, 'data-shrinklon="%d' % longitude)
self.assertContains(response, 'Australia')
self.assertContains(response, 'data-shrinklat="%d' % new_latitude)
self.assertContains(response, 'data-shrinklon="%d' % new_longitude)
p = DjangoPerson.objects.get(user__username='daveb')
self.assertTrue(abs(p.latitude - new_latitude) < 0.01)
self.assertTrue(abs(p.longitude - new_longitude) < 0.01)
self.assertEqual(p.location_description, new_location_description)
self.assertEqual(p.country.iso_code, new_country)
def test_update_us_location(self):
url = reverse('edit_location', args=['daveb'])
data = {
'location_description': 'Rapid City, South Dakota',
'country': 'US',
'latitude': '44.07883004975277',
'longitude': '-103.28332901005193',
'region': 'SD',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
def test_edit_location_form_error_fields_required(self):
url_edit_location = reverse('edit_location', args=['daveb'])
new_longitude = 153.023071289
new_latitude = -27.5411533739
new_location_description = 'Brisbane'
new_country = 'AU' # iso code of Australia
location_dict = {'longitude': new_longitude,
'latitude': new_latitude,
'location_description': new_location_description,
'country': new_country}
response = self.client.post(url_edit_location, data=location_dict)
self.assertEqual(response.status_code, 302)
# remove longitutde
location_dict.pop('longitude')
response = self.client.post(url_edit_location, data=location_dict)
self.assertFormError(response, 'form', 'longitude',
'This field is required.')
# remove latitude
location_dict.pop('latitude')
response = self.client.post(url_edit_location, data=location_dict)
self.assertFormError(response, 'form', 'longitude',
'This field is required.')
self.assertFormError(response, 'form', 'latitude',
'This field is required.')
# remove location_description
location_dict.pop('location_description')
response = self.client.post(url_edit_location, data=location_dict)
self.assertFormError(response, 'form', 'longitude',
'This field is required.')
self.assertFormError(response, 'form', 'latitude',
'This field is required.')
self.assertFormError(response, 'form', 'location_description',
'This field is required.')
# remove country
location_dict.pop('country')
response = self.client.post(url_edit_location, data=location_dict)
self.assertFormError(response, 'form', 'longitude',
'This field is required.')
self.assertFormError(response, 'form', 'latitude',
'This field is required.')
self.assertFormError(response, 'form', 'location_description',
'This field is required.')
self.assertFormError(response, 'form', 'country',
'This field is required.')
def test_edit_loctaion_form_error_invalid_iso_code(self):
url_edit_location = reverse('edit_location', args=['daveb'])
new_longitude = 153.023071289
new_latitude = -27.5411533739
new_location_description = 'Brisbane'
new_country = 'XXX' # invalid iso code
location_dict = {'longitude': new_longitude,
'latitude': new_latitude,
'location_description': new_location_description,
'country': new_country}
response = self.client.post(url_edit_location, data=location_dict)
self.assertFormError(
response, 'form', 'country',
'Select a valid choice. XXX is not one of the available choices.'
)
def test_edit_location_not_in_the_atlantic(self):
'''
test form error message when 43 < lat < 45 and -39 < lon < -33
'''
url_edit_location = reverse('edit_location', args=['daveb'])
new_longitude = -35
new_latitude = 44
new_location_description = 'Brisbane'
new_country = 13 # id of Australia
location_dict = {'longitude': new_longitude,
'latitude': new_latitude,
'location_description': new_location_description,
'country': new_country}
response = self.client.post(url_edit_location, data=location_dict)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'location_description',
('Drag and zoom the map until the crosshair '
'matches your location'))
def test_delete_account(self):
url = reverse('delete_account_request', args=['daveb'])
response = self.client.get(url)
self.assertContains(response, "Account deletion")
response = self.client.post(url, {})
url = reverse('delete_account_next', args=['daveb'])
self.assertRedirects(response, url)
self.assertEqual(len(mail.outbox), 1)
response = self.client.get(url)
self.assertContains(response, 'An email was just sent')
url = mail.outbox[0].body.split('testserver')[2].split('\n')[0]
response = self.client.get(url)
self.assertContains(response, 'Account deletion')
target = response.content.decode('utf8').split('action="')[1].split('"', 1)[0]
self.assertEqual(target, url)
data = {'password': 'example'}
response = self.client.post(url, data)
self.assertContains(response, 'Your password was invalid')
self.assertEqual(User.objects.count(), 3)
response = self.client.post(url, {'password': '123456'})
self.assertEqual(User.objects.count(), 2)
with self.assertRaises(User.DoesNotExist):
User.objects.get(username='daveb')
url = reverse('delete_account_done', args=['daveb'])
self.assertRedirects(response, url)
response = self.client.get(url)
self.assertContains(response, 'Account deleted')
def test_failing_deletion(self):
# expired link: redirect to form
url = reverse('delete_account',
args=['daveb', 'Mg:1Sd7hl:RoSbkTsuqHVUjChAwoB5HZumgCg'])
response = self.client.get(url, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertContains(response, 'Account deletion')
# invalid link: 404
url = reverse('delete_account', args=['daveb', 'test_some_data'])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# delete confirmation page only shown if account does not exist
url = reverse('delete_account_done',
args=[User.objects.all()[0].username])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
|
nilq/baby-python
|
python
|
import unittest
from smart_energy_api import solaredge_api as s
class SolaredgeApiSideEffects(unittest.TestCase):
def test_solaredgemeters_meterdata(self):
d = s.solaredgemeters.meterdata()
print(d)
self.assertIsInstance(d, dict)
def test_siteenergy_energydata(self):
d = s.siteenergy.energydata()
print(d)
self.assertIsInstance(d, dict)
def test_sitepower_powerdata(self):
d = s.sitepower.powerdata()
print(d)
self.assertIsInstance(d, dict)
def test_overview_site_overview(self):
d = s.overview.site_overview()
print(d)
self.assertIsInstance(d, dict)
def test_siteenergydetails_energydetailsdata(self):
d = s.siteenergydetails.energydetailsdata()
print(d)
self.assertIsInstance(d, dict)
def test_sitepoweflow_powerflowdata(self):
d = s.sitepowerflow.powerflowdata()
print(d)
self.assertIsInstance(d, dict)
def test_sitestorage_storagedata(self):
d = s.sitestorage.storagedata()
print(d)
self.assertIsInstance(d, dict)
def test_siteenvbenefits_envdata(self):
d = s.siteenvbenefits.envdata()
print(d)
self.assertIsInstance(d, dict)
def test_siteinverter_inverterdata(self):
d = s.siteinverter.inverterdata()
print(d)
self.assertIsInstance(d, dict)
def test_sitesensors_sensordata(self):
d = s.sitesensors.sensordata()
print(d)
self.assertIsInstance(d, dict)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
from django.utils.translation import ugettext_lazy as _
from fluent_pages.integration.fluent_contents.models import FluentContentsPage
from parler.models import TranslatableModel
from parler.utils.context import switch_language
from fluent_blogs.models import get_entry_model
class BlogPage(FluentContentsPage):
class Meta:
verbose_name = _("Blog module")
verbose_name_plural = _("Blog modules")
@property
def entries(self):
"""
Return the entries that are published under this node.
"""
# Since there is currently no filtering in place, return all entries.
EntryModel = get_entry_model()
qs = get_entry_model().objects.order_by('-publication_date')
# Only limit to current language when this makes sense.
if issubclass(EntryModel, TranslatableModel):
admin_form_language = self.get_current_language() # page object is in current language tab.
qs = qs.active_translations(admin_form_language).language(admin_form_language)
return qs
def get_entry_queryset(self, view_url_name, for_user=None, include_hidden=False):
"""
Return the base queryset that will be shown at this blog page.
This allows subclasses of the `BlogPage` to limit which pages
are shown at a particular mount point.
"""
return get_entry_model().objects.published(for_user=for_user, include_hidden=include_hidden)
def get_entry_url(self, entry):
"""
Return the URL of a blog entry, relative to this page.
"""
# It could be possible this page is fetched as fallback, while the 'entry' does have a translation.
# - Currently django-fluent-pages 1.0b3 `Page.objects.get_for_path()` assigns the language of retrieval
# as current object language. The page is not assigned a fallback language instead.
# - With i18n_patterns() that would make strange URLs, such as '/en/blog/2016/05/dutch-entry-title/'
# Hence, respect the entry language as starting point to make the language consistent.
with switch_language(self, entry.get_current_language()):
return self.get_absolute_url() + entry.get_relative_url()
|
nilq/baby-python
|
python
|
from math import prod
from typing import List
from digits import champernowne_digit
def p40(positions: List[int]) -> int:
return prod(champernowne_digit(n) for n in positions)
if __name__ == '__main__':
print(p40([1, 10, 100, 1000, 10000, 100000, 1000000]))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import logging
from typing import Union
from expiringdict import ExpiringDict
from .cognito import CognitoUserPassAuth, CognitoBase, CognitoTokenAuth
from .entities import User, JWTToken, JWTPublicKeyRing
from . import __appname__
__author__ = "Giuseppe Chiesa"
__copyright__ = "Copyright 2017, Giuseppe Chiesa"
__credits__ = ["Giuseppe Chiesa"]
__license__ = "BSD"
__maintainer__ = "Giuseppe Chiesa"
__email__ = "mail@giuseppechiesa.it"
__status__ = "PerpetualBeta"
class Authenticator(object):
def __init__(self, cache_obj: ExpiringDict, client_id: str = '', user_salt: str = '') -> None:
self.logger = logging.getLogger(f'{__appname__}.{self.__class__.__name__}')
self._data = cache_obj
self._client_id = client_id
self._user_salt = user_salt
def _get_from_cache(self, username: str) -> Union[None, User]:
if not self._data:
return None
return self._data.get(username, None)
def _cognito_auth(self, username: str, password: str) -> Union[None, User]:
cauth = CognitoUserPassAuth(client_id=self._client_id)
return cauth.authenticate(username, password, self._user_salt)
def auth_basic(self, username: str, password: str) -> Union[None, User]:
cached_user = self._get_from_cache(username)
if cached_user:
if cached_user == User(username, password, self._user_salt):
return cached_user
return self._cognito_auth(username, password)
def refresh_token(self, token: str) -> Union[None, User]:
cauth = CognitoBase(self._client_id)
return cauth.refresh_token(token)
def auth_token(self, token: JWTToken, pubkey_ring: JWTPublicKeyRing) -> Union[None, User]:
cauth = CognitoTokenAuth(self._client_id, pubkey_ring)
user = cauth.authenticate(token)
if not user:
return None
cached_user = self._get_from_cache(user.username)
return cached_user or user
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 16:11:08 2019
Analyze performance of multi sensor localization algorithms
@author: anantgupta
"""
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing as mp
import pickle
# from IPython import get_ipython
from functools import partial
import os as os
from tqdm import tqdm
import matplotlib.animation as animation
import time
# Custom libs
import GAutils.objects as ob
import GAutils.config as cfg # Sim parameters
import GAutils.proc_est as pr
import GAutils.simulate_snapshot2 as sim2
import GAutils.perf_eval as prfe
import GAutils.PCRLB as pcrlb
import importlib
importlib.reload(cfg)
def set_params(name, value):
exec('cfg.'+name+' = value')
def main():
#if 1: # For spyder
Nsensa = cfg.Nsensa
# Naming algorithm names & Plotting
alg_name = ['Estimation', 'Graph Init.','Association','Refinement','All_edges','Brute',cfg.mode+'-Edges',cfg.mode+'-LLR']
Nf = cfg.Nf
Noba=cfg.Noba
snra=cfg.snra
static_snapshot = cfg.static_snapshot
runtime = np.zeros([8,cfg.Ninst])
ospa_error1 = np.zeros([cfg.Ninst,cfg.Nf,5])
PVerror = np.zeros((cfg.Ninst, max(Noba),2))
rd_error = np.zeros([cfg.Ninst,cfg.Nf,2])
rd_err1 = np.zeros((cfg.Ninst, max(Nsensa), max(Noba),2))
rd_err2 = np.zeros((cfg.Ninst, max(Nsensa), max(Noba),2))
crb1 = np.zeros((cfg.Ninst, max(Nsensa), max(Noba),2))
crbpv = np.zeros((cfg.Ninst, max(Noba),2))
present = np.zeros((cfg.Ninst, max(Nsensa), max(Noba)))
Nmiss1=np.zeros((cfg.Ninst, max(Nsensa)))
Nfa1 =np.zeros((cfg.Ninst, max(Nsensa)))
grca = [[] for _ in range(cfg.Ninst)]
glena = np.zeros((cfg.Ninst, 100))
Ndet = np.zeros((cfg.Ninst,cfg.Nf))
plt.close('all')
#for plt_n in range(1,6): plt.figure(plt_n), plt.clf()
#%%
# Arrange sensors in worst case to build up a scene
sensorsa = []
sx=np.linspace(-max(cfg.swidtha), max(cfg.swidtha), max(cfg.Nsensa))
for x in sx:
sensorsa.append(ob.Sensor(x,0))
np.random.seed(28)
seeda = np.random.randint(1000, size=Nf)
# print('Seeds used:',seeda)
# TODO NOTE: Min threshold might not be satisfied for all sensors!!
scenea = [pr.init_random_scene(max(Noba), sensorsa, cfg.sep_th, seeda[f]) for f in range(Nf)]
t=time.time()
# Step 1: Init multiprocessing.Pool()
if cfg.N_cpu <1:
N_cpu = mp.cpu_count()
else:
N_cpu = cfg.N_cpu
pool = mp.Pool(N_cpu)
print('Using CPU count = ',str(N_cpu))
# snap = partial(sim2.run_snapshot, )
for inst in tqdm(range(cfg.Ninst), desc='Instances'):
Nob = Noba[inst]
Nsens = Nsensa[inst]
swidth = cfg.swidtha[inst]
# Generate sensor each time
sx=np.linspace(-swidth/2, swidth/2,Nsens)
sensors = [ob.Sensor(x,0) for x in sx]
cfgp = {'Nsel': [],# Genie info on # targets
'rd_wt':cfg.rd_wt,
'static_snapshot': cfg.static_snapshot,
'sep_th':cfg.sep_th,
'pmiss':cfg.pmissa[inst],
'estalgo':cfg.estalgo,
'osps':cfg.osps,
'n_Rc':cfg.n_Rc,
'n_pfa':cfg.n_pfa,
# Association
'rob':cfg.roba[inst],
'mode': cfg.mode,
'hscale':cfg.hscale,
'incr':cfg.incr,
'hN': cfg.hN,
'ag_pfa':cfg.ag_pfa,
'al_pfa':cfg.al_pfa,
'Tlen':cfg.Tlen,
# Gauss Newton
'gn_steps':cfg.gn_steps,
'fu_alg':cfg.fu_alg
}
# print('Running {} of {} '.format(inst+1, cfg.Ninst))
if cfg.parallel:
# snapshot_results = []
argarray = [(scenea[f][0:Nob], sensors, snra[inst], cfgp, seeda[f]) for f in range(Nf)]
snapshot_results = pool.starmap(sim2.run_snapshot, argarray)
for f in tqdm(range(Nf),desc='Averaging', leave=False): # Loop over frames
if cfg.parallel:
snapshot_result = snapshot_results[f]
else:
snapshot_result = sim2.run_snapshot(scenea[f][0:Nob], sensors, snra[inst], cfgp, seeda[f])
Ndet[inst, f] = len(snapshot_result['loc']) # Count target associated
runtime[:,inst] += snapshot_result['runtime']
ospa_error1[inst,f,:] += snapshot_result['OSPAerror1'] # track based
glen = snapshot_result['glen']
glena[inst,:len(glen)] += np.array(glen)
ret, det, Nmisst, Nfat, crbt, presentt = snapshot_result['RDpack']#prfe.compute_rde_targetwise(garda_sel, gardat, sensors)
rd_error[inst,f,:] += np.sum(snapshot_result['RDerror'],axis =1) # Already Mutiplied by number of targets detected
grca[inst].append( snapshot_result['loc'] )
rd_err1[inst,:Nsens,:Nob,0] += np.array(ret)
rd_err1[inst,:Nsens,:Nob,1] += np.array(det)
rd_err2[inst,:Nsens,:Nob,0] += np.array(ret)**2
rd_err2[inst,:Nsens,:Nob,1] += np.array(det)**2
present[inst,:Nsens,:Nob] +=presentt
crb1[inst,:Nsens,:Nob] += snapshot_result['crbrd']/Nf #crbt
Nmiss1[inst,:Nsens] += Nmisst
Nfa1[inst,:Nsens] += Nfat
crbpv[inst,:Nob] += snapshot_result['crbpv']/Nf
PVerror[inst,:Nob] += snapshot_result['PVerror']/Nf
# for i in range(3,5):
# print(grca[inst][0][i-3].x)
# print(ospa_error1[inst,f,i])
#Average or update scene
if not static_snapshot: scene = snapshot_result['next_scene'] # Update scene for next timestep
# Step 3: Don't forget to close
pool.close()
print('Processing took {} s.'.format(time.time()-t))
#%% Mask the arrays for averaging
mask1 = np.ones((cfg.Ninst, max(Nsensa), max(Noba),2))
for i in range(cfg.Ninst):
mask1[i,:Nsensa[i],:Noba[i],:]=0
rd_err1 = np.ma.array(rd_err1, mask=mask1)
rd_err2 = np.ma.array(rd_err2, mask=mask1)
crb1 = np.ma.array(crb1, mask=mask1)
present = np.ma.array(present, mask=mask1[:,:,:,0])
Nmiss1=np.ma.array(Nmiss1, mask=mask1[:,:,0,0])
Nfa1 =np.ma.array(Nfa1, mask=mask1[:,:,0,0])
crbpv = np.ma.array(crbpv, mask=mask1[:,0,:,:])
PVerror = np.ma.array(PVerror, mask=mask1[:,0,:,:])
#%% INterference CRB
#%% Final Plotting
# plt.switch_backend('Qt4Agg')
rng_used = cfg.rng_used
units=['(m)','(m/s)']
plt.figure(1)
plt.subplot(1,2,1)
plt.bar(range(4), np.mean(runtime[:4], axis=1), tick_label=alg_name[:4]),plt.grid(True)
plt.subplot(1,2,2)
pltn={}
for i in range(4):
pltn[i]= plt.plot(rng_used, runtime[i,:], label = alg_name[i]),plt.grid(True)
plt.legend()
fig = plt.gcf()
fig.set_size_inches(8.8,4.8)
plt.tight_layout()
# Track comparisons
plt.figure(11)
plt.subplot(1,2,1)
plt.bar(range(3), np.mean(runtime[4:7], axis=1), tick_label=alg_name[4:7]),plt.grid(True)
plt.ylabel('Number of Tracks visited'),plt.title('Association Complexity')
plt.subplot(1,2,2)
pltn={}
for i in range(4,8):
pltn[i]= plt.plot(rng_used, runtime[i,:], label = alg_name[i]),plt.grid(True)
plt.legend(),plt.xlabel(cfg.xlbl),plt.ylabel('Number of Tracks visited'),plt.title('Association Complexity')
plt.yscale('log')
fig = plt.gcf()
fig.set_size_inches(8.8,4.8)
plt.tight_layout()
# Analyze track quality
# plt.figure(2)
# plt.plot(St_er)
# plt.xlabel(cfg.xlbl),plt.ylabel('RMS Error'),plt.title('Error Nearest Phantom(Solid), Auto KF(Dashed)')
# plt.plot(Auto_er, linestyle='--'),plt.legend(['x','y','v_x','x','y','v_x'])
# Ananlyze
capt2 = ['Position error','Velocity error']
plt.figure(2)
for i in range(3,5):
plt.subplot(1,2,i-2)
# plt.errorbar(rng_used, np.mean(ospa_error1[:,:,i], axis=1), np.std(ospa_error1[:,:,i], axis=1), color='r')
# plt.errorbar(rng_used, np.mean(np.sqrt(crbpv[:,:,i-3]), axis=(1)), np.std(np.sqrt(crbpv[:,:,i-3]), axis=(1)), color='k')
# plt.plot(rng_used, 10*np.log10(np.mean(np.sqrt(PVerror[:,:,i-3]),axis=1)#/np.mean(Ndet,axis=1) #Original
if True:
# Find where are non zero PVerrors
PVTemp = PVerror[:,:,i-3]
CRBTemp = crbpv[:,:,i-3]
plt.plot(rng_used, 10*np.log10([np.mean(np.sqrt(PVi[PVi>0])) for PVi in PVTemp]
), color='r', label='RMSE')
plt.plot(rng_used, 10*np.log10([np.mean(np.sqrt(CRBT[PVi>0])) for (PVi,CRBT) in zip(PVTemp,CRBTemp)]
), 'k--', label='CRB'),plt.yscale('linear')
else:
plt.plot(rng_used, 10*np.log10(np.mean(np.sqrt(PVerror[:,:,i-3]),axis=1)
), color='r', label='RMSE')
plt.plot(rng_used, 10*np.log10(np.mean(np.sqrt(crbpv[:,:,i-3]),axis=1)
), 'k--', label='CRB'),plt.yscale('linear')
# plt.subplot(2,2,i)
# for j in range(crbpv.shape[1]):
# plt.plot(rng_used, np.sqrt(PVerror[:,j,i-3]), color='r')
# plt.plot(rng_used, (np.sqrt(crbpv[:,j,i-3])), color='k'),plt.yscale('log')
plt.xlabel(cfg.xlbl),plt.ylabel('RMS Error (dB)'+units[i-3]),plt.title(capt2[i-3]),plt.grid(True)
fig = plt.gcf()
fig.set_size_inches(8,4.8)
plt.tight_layout()
capt3 = ['Overall','Localization error','Cardinality error']
plt.figure(3)
for i in range(3):
plt.subplot(1,3,i+1)
plt.errorbar(rng_used, np.mean(ospa_error1[:,:,i], axis=1), np.std(ospa_error1[:,:,i], axis=1), color='r')
plt.xlabel(cfg.xlbl),plt.title(capt3[i]),plt.grid(True)
if i<=1:
plt.yscale('log'), plt.ylabel('RMS Error (?)')
else:
plt.ylabel('Error in Num targets')
fig = plt.gcf()
fig.set_size_inches(9.6,4.8)
plt.tight_layout()
capt4 = ['Range Error','Doppler Error']
plt.figure(4)
for i in range(2):
plt.subplot(1,2,i+1)
# plt.plot(rng_used, 10*np.log10(np.sum(np.sqrt(rd_err2[:,:,:,i]), axis =(1,2))/np.sum(present,axis=(1,2))), 'r-', label='RMSE')
plt.plot(rng_used, 10*np.log10(np.sqrt(np.sum(rd_err2[:,:,:,i], axis =(1,2))/np.sum(present,axis=(1,2)))), 'r-', label='RMSE')
plt.plot(rng_used, 10*np.log10(np.sqrt(np.mean(crb1[:,:,:,i], axis=(1,2)))), 'k--', label='CRB')
# plt.plot(rng_used, 10*np.log10(np.mean(np.sqrt(crb1[:,:,:,i]), axis=(1,2))), 'k--', label='CRB')
plt.xlabel(cfg.xlbl),plt.ylabel('RMS Error (dB)'+units[i]),plt.title(capt4[i]),plt.grid(True),plt.yscale('linear')
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(8,4.8)
capt4 = ['Range Error, ','Doppler Error, ']
if cfg.sensor_wise:
plt.figure(5)
for i in range(2):
for j in range(Nsens):
plt.subplot(2,Nsens, i*Nsens+j+1)
plt.errorbar(rng_used, np.mean(rd_err1[:,j,:,i]/present[:,j,:],axis=1),
np.sqrt(np.mean(rd_err2[:,j,:,i]/present[:,j,:]-(rd_err1[:,j,:,i]/present[:,j,:])**2, axis =1)),label='S{}'.format(j))
if i==1: plt.xlabel(cfg.xlbl)
if j==0: plt.ylabel('RMS Error '+units[i])
plt.title(capt4[i]),plt.legend(),plt.grid(True)
fig = plt.gcf()
fig.set_size_inches(12.8,7.2)
plt.tight_layout()
plt.figure(6)
ax1, ax2 = plt.subplot(2,2,1), plt.subplot(2,2,2)
for j in range(Nsens):
ax1.plot(rng_used, np.mean(present[:,j,:],axis=1)/Nf, label='S{}'.format(j+1))
ax1.set_title('Expected P(Detection), Miss, False Alarm'),ax1.set_xlabel(cfg.xlbl),ax1.grid(True),ax1.legend()
for j in range(Nsens):
tr_p = np.mean(present[:,j,:],axis=1)/Nf
fa_p = Nfa1[:,j]/Nf
fa_n = Nmiss1[:,j]/Nf
precision_m = tr_p/(fa_p+tr_p)
recall_m = tr_p/(tr_p+fa_n)
ax2.scatter(recall_m, precision_m)
ax2.set_title('Precision vs Recall'),ax2.set_ylabel('Precision'),ax2.set_xlabel('Recall'),ax2.grid(True)
plt.subplot(2,2,3)
for j in range(Nsens):
plt.plot(rng_used, Nmiss1[:,j]/Nf, label='S{}'.format(j+1))
plt.title('Missed targets'),plt.legend(),plt.grid(True),plt.xlabel(cfg.xlbl),plt.ylabel(r'$E\left[(N_{est}-N_{true})_-\right]$')
plt.subplot(2,2,4)
for j in range(Nsens):
plt.plot(rng_used, Nfa1[:,j]/Nf, label='S{}'.format(j+1))
plt.title('False Targets'),plt.legend(),plt.grid(True),plt.xlabel(cfg.xlbl),plt.ylabel(r'$E\left[(N_{est}-N_{true})_+\right]$')
resizefig(plt, 8,6)
plt.figure(8)
for i in range(2):
for j in range(Nsens):
plt.subplot(2,Nsens,Nsens*i+j+1)
for k in range(Nob):
plt.plot(rng_used, np.sqrt((rd_err2[:,j,k,i]/present[:,j,k]-(rd_err1[:,j,k,i]/present[:,j,k])**2)))
plt.gca().set_prop_cycle(None)# Reset coloring
for k in range(Nob):
plt.plot(rng_used, np.sqrt(crb1[:,j,k,i]/present[:,j,k]), '--')
if i==1: plt.xlabel(cfg.xlbl)
if j==0: plt.ylabel('RMS Error '+units[i])
plt.title(capt4[i]+'Sensor '+str(j+1)),plt.grid(True),plt.yscale('log')
resizefig(plt, 12.8,7.2)
else:
plt.figure(5)
for i in range(2):
plt.subplot(1,2, i+1)
plt.errorbar(rng_used, np.mean(rd_err1[:,:,:,i]/present,axis=(1,2)),
np.sqrt(np.mean(rd_err2[:,:,:,i]/present-(rd_err1[:,:,:,i]/present)**2, axis =(1,2))))
plt.xlabel(cfg.xlbl),plt.ylabel('RMS Error'),plt.title(capt4[i]),plt.grid(True)
plt.figure(6)
plt.errorbar(rng_used, np.mean(present[:,:,:]/Nf, axis=(1,2)), np.std(present/Nf, axis=(1,2)),label='P_D')
plt.errorbar(rng_used,np.mean( Nmiss1/Nf, axis=1),np.std( Nmiss1/Nf, axis=1), label= 'Miss')
plt.errorbar(rng_used,np.mean( Nfa1/Nf, axis=1),np.std( Nfa1/Nf, axis=1),label = 'False Alarm')
plt.title('Expected P(Detection), Miss, False Alarm'),plt.legend(),plt.grid(True),plt.xlabel(cfg.xlbl)
plt.figure(8)
for i in range(2):
plt.subplot(1,2,i+1)
plt.errorbar(rng_used, np.sqrt(np.mean(rd_err2[:,:,:,i]/present-(rd_err1[:,:,:,i]/present)**2, axis=(1,2))),
np.sqrt(np.std(rd_err2[:,:,:,i]/present-(rd_err1[:,:,:,i]/present)**2, axis=(1,2))))
plt.errorbar(rng_used, np.sqrt(np.mean(crb1[:,:,:,i]/present,axis=(1,2))),
np.sqrt(np.std(crb1[:,:,:,i]/present,axis=(1,2))), fmt= '--')
plt.gca().set_prop_cycle(None)# Reset coloring
plt.xlabel('Sensor'),plt.ylabel('RMS Error'),plt.title(capt4[i]),plt.grid(True),plt.yscale('log')
# plt.figure(7)
fig, axs = plt.subplots(2, 2, num=7)# systemwide
tr_p = np.array([ospa_error1[j,:,3]/Nob for j,Nob in enumerate(Noba)])
fa_p = np.array([(ospa_error1[j,:,2]+Nob-ospa_error1[j,:,3])/Nob for j,Nob in enumerate(Noba)])
fa_n = np.array([(Nob-ospa_error1[j,:,3])/Nob for j,Nob in enumerate(Noba)])
precision_m = tr_p/(fa_p+tr_p)
recall_m = tr_p/(tr_p+fa_n)
axs[0,0].errorbar(rng_used, np.mean(tr_p,axis=1),np.std(tr_p,axis=1), label='P_D')
axs[0,0].errorbar(rng_used, np.mean(fa_p,axis=1),np.std(fa_p,axis=1), label = 'False Alarm')
axs[0,0].errorbar(rng_used, np.mean(fa_n,axis=1),np.std(fa_n,axis=1), label = 'Miss')
axs[0,0].set_title('Expected P(Detection), Miss, False Alarm'),axs[0,0].set_ylabel(r'$P_D$')
axs[0,0].set_xlabel(cfg.xlbl),axs[0,0].grid(True),axs[0,0].legend()
axs[0,1].scatter(recall_m, precision_m)
axs[0,1].set_title('Precision vs Recall'),axs[0,1].set_ylabel('Precision'),axs[0,1].set_xlabel('Recall'),axs[0,1].grid(True)
axs[1,0].hist([Nob + ospa_error1[j,:,2] for j,Nob in enumerate(Noba)])
axs[1,0].set_title('Histogram of detections (system-level)')
resizefig(plt, 8,6)
# Add plot for combined measure (P(estimate in ball|detect))
plt.figure(9)
for j in range(Nsens):
plt.subplot(2,Nsens,j+1)
prfe.plotg(rd_err1[:,j,:,0].flatten(), np.sqrt(np.sum(crb1[:,j,:,0],
axis=(0,1))/sum(Noba*Nsens)),plt,True),plt.title(r'$\Delta R$ Sensor {}'.format(j+1))
plt.subplot(2,Nsens,Nsens+j+1)
prfe.plotg(rd_err1[:,j,:,1].flatten(), np.sqrt(np.sum(crb1[:,j,:,1],
axis=(0,1))/sum(Noba*Nsens)),plt,True),plt.title(r'$\Delta D$ Sensor {}'.format(j+1))
fig = plt.gcf()
fig.set_size_inches(12.8,7.2)
plt.tight_layout()
plt.figure(10)
plt.subplot(1,2,1)
for i in range(cfg.Ninst):
hN_max = np.count_nonzero(glena[i,:])
plt.plot(range(hN_max+2), (glena[i,:hN_max+2]/Nf), label = str(rng_used[i]))
plt.legend(),plt.grid(True),plt.title('Graph nodes v/s relax iterations'),plt.ylabel('Num vertices'),plt.xlabel('Iterations')
plt.subplot(1,2,2)
plt.errorbar(rng_used, np.mean(Ndet, axis=1), np.std(Ndet, axis =1), label = 'Estimated')
plt.plot(rng_used, cfg.Noba, 'k:', label = 'True')
plt.legend(),plt.grid(True),plt.title('Model order estimation'),plt.ylabel('Num targets detected'),plt.xlabel(cfg.xlbl)
resizefig(plt, 8,4.8)
# Save files
try:
# Create target Directory
os.makedirs(cfg.folder)
print("Directory " , cfg.folder , " Created ")
except FileExistsError:
print("Directory " , cfg.folder , " already exists")
# Setup video files
if cfg.movie:
try:
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Anant',comment='Target motion')
writer = FFMpegWriter(fps=1, metadata=metadata)
fig = plt.figure(15)
with writer.saving(fig, '{}/Scenes.mp4'.format(cfg.folder), dpi=100):
for i, scene in enumerate(scenea):
for j in range(cfg.Ninst):
sx=np.linspace(-cfg.swidtha[j], cfg.swidtha[j],cfg.Nsensa[j])
sensorsp = [ob.Sensor(x,0) for x in sx]
phlist = grca[j][i]
plt.clf()
for gr in phlist:
if abs(gr.vx)+abs(gr.vy)>0:
plt.quiver(gr.x, gr.y,gr.vx,gr.vy, color='r', headwidth = 4, headlength=6, headaxislength=5)
else:
plt.plot(gr.x, gr.y, 'ro')
pr.plot_scene(plt, scene[:Noba[j]], sensorsp, 15, 'Scene {} with {} detections, SNR = {} dB'.format(i, np.round(np.sum(present[j,:,:],axis=1)/Nf/Noba[j],2), round(snra[j])))
writer.grab_frame()
except Exception as e: print(e)
# Save variables
# np.savetxt('{}/mat.out'.format(cfg.folder), (Noba, snra), delimiter=",")
handle = open('{}/params.txt'.format(cfg.folder),'w')
handle.write('Robust Level={}\n'.format(cfg.roba))
handle.write('Sep_th={}\n'.format(cfg.sep_th))
handle.write('SNR={}\n'.format(np.round(snra,2)))
handle.write('Nsens={}\n'.format(cfg.Nsensa))
handle.write('Noba={}\n'.format(np.round(Noba,2)))
handle.write('Sensor Width={}\n'.format(cfg.swidtha))
mcss=sensors[0].mcs
handle.write('Sensor BW={}Hz,R_res={}m, D_res={}m/s \n'.format(mcss.B, 3e8/2/mcss.B, 3e8/2/mcss.fc/mcss.tf))
handle.write('Monte Carlo Iterations={}\n'.format(cfg.Nf))
handle.write('mode={}\n'.format(cfg.mode))
handle.write('Tlen={}\n'.format(cfg.Tlen))
handle.write('Pmiss={}\n'.format(cfg.pmissa))
handle.write('Est_Algo={}\n'.format(cfg.estalgo))
handle.write('NOMP: OSPS={}, n_pfa={}, n_Rc={}\n'.format(cfg.osps,cfg.n_pfa,cfg.n_Rc))
handle.write('GA-DFS: ag_pfa={}, al_pfa={}\n'.format(cfg.ag_pfa, cfg.al_pfa))
handle.write('Relax: hN={}, hscale={}, incr ={}\n'.format(cfg.hN, cfg.hscale, cfg.incr))
handle.write('Misc: rd_wt={}, fu_alg={}, gn_steps={}'.format(cfg.rd_wt, cfg.fu_alg, cfg.gn_steps))
for fignum in range(1,12):
plt.figure(fignum)
plt.savefig("{}/{}".format(cfg.folder,fignum), Transparent=True)
if fignum not in [5,8,9]:
pickle.dump(plt.figure(fignum), open("{}/plot{}.pickle".format(cfg.folder,fignum), "wb"))
plt.close('all')
print('Processing+Plotting took {} s.'.format(time.time()-t))
def resizefig(plt, x, y):
fig = plt.gcf()
fig.set_size_inches(x,y)
plt.tight_layout()
if __name__ == "__main__":
__spec__ = None
# ipython = get_ipython()
# ipython.magic('%load_ext autoreload')
# ipython.magic('%autoreload 2')
# ipython.magic('%matplotlib')
main()
|
nilq/baby-python
|
python
|
a = "hello"
print(a[1])
# this gets the character at the 1st position
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.17 on 2021-04-15 15:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('autoemails', '0015_auto_20210405_1920'),
('consents', '0003_term_help_text'),
]
operations = [
migrations.AddField(
model_name='term',
name='rq_jobs',
field=models.ManyToManyField(blank=True, help_text='This should be filled out by AMY itself.', to='autoemails.RQJob', verbose_name='Related Redis Queue jobs'),
),
]
|
nilq/baby-python
|
python
|
# coding: utf-8
import numpy as np
import cPickle
import utils
import h5py
import os
def convert_files(file_paths, vocabulary, punctuations, output_path):
inputs = []
outputs = []
punctuation = " "
for file_path in file_paths:
with open(file_path, 'r') as corpus:
for line in corpus:
array = np.zeros(shape=(1, len(vocabulary)), dtype=np.int8)
array[0,utils.input_word_index(vocabulary, "<START>")] = 1
inputs.append(array)
array = np.zeros(shape=(1, len(punctuations)), dtype=np.int8)
array[0,utils.punctuation_index(punctuations, " ")] = 1
outputs.append(array)
for token in line.split():
if token in punctuations:
punctuation = token
continue
else:
array = np.zeros(shape=(1, len(vocabulary)), dtype=np.int8)
array[0,utils.input_word_index(vocabulary, token)] = 1
inputs.append(array)
array = np.zeros(shape=(1, len(punctuations)), dtype=np.int8)
array[0,utils.punctuation_index(punctuations, punctuation)] = 1
outputs.append(array)
punctuation = " "
array = np.zeros(shape=(1, len(vocabulary)), dtype=np.int8)
array[0,utils.input_word_index(vocabulary, "<END>")] = 1
inputs.append(array)
array = np.zeros(shape=(1, len(punctuations)), dtype=np.int8)
array[0,utils.punctuation_index(punctuations, punctuation)] = 1
outputs.append(array)
assert len(inputs) == len(outputs)
inputs = np.array(inputs, dtype=np.int8).reshape((len(inputs), 1, len(vocabulary)))
outputs = np.array(outputs, dtype=np.int16).reshape((len(inputs), len(punctuations)))
f = h5py.File(output_path + '.h5', "w")
dset = f.create_dataset('inputs', data=inputs, dtype='i8')
dset = f.create_dataset('outputs',data=outputs, dtype='i8')
data = {"vocabulary": vocabulary, "punctuations": punctuations,
"total_size": len(inputs)}
with open(output_path + '.pkl', 'wb') as output_file:
cPickle.dump(data, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
PHASE1_TRAIN_PATH = "../data/train1"
PHASE1_DEV_PATH = "../data/dev1"
PUNCTUATIONS = {" ": 0, ".PERIOD": 1, ",COMMA": 2}
VOCABULARY_FILE = "../raw_data/vocab"
TRAIN_DATA = "../raw_data/train.txt"
DEV_DATA = "../raw_data/dev.txt"
if not os.path.exists("../data"):
os.makedirs("../data")
print("Converting data...")
vocabulary = utils.load_vocabulary(VOCABULARY_FILE)
convert_files([TRAIN_DATA], vocabulary, PUNCTUATIONS, PHASE1_TRAIN_PATH)
convert_files([DEV_DATA], vocabulary, PUNCTUATIONS, PHASE1_DEV_PATH)
|
nilq/baby-python
|
python
|
from __future__ import division
from ev3.lego import ColorSensor
from time import time, sleep
tick = 0.05
color = ColorSensor()
def median(lst):
lst = sorted(lst)
if len(lst) < 1:
return None
if len(lst) %2 == 1:
return lst[((len(lst)+1)//2)-1]
if len(lst) %2 == 0:
return float(sum(lst[(len(lst)//2)-1:(len(lst)//2)+1]))/2.0
def unzip3(data):
d1 = []
d2 = []
d3 = []
for v1, v2, v3 in data:
d1.append(v1)
d2.append(v2)
d3.append(v3)
return (d1, d2, d3)
def calibration():
print("Give me black and press enter!")
black = []
#raw_input()
for i in range(1,20):
black.append(color.rgb)
sleep(tick)
print("Black acquired")
sleep(3)
print("Give me white and press enter!")
white = []
#raw_input()
for i in range(1,20):
white.append(color.rgb)
sleep(tick)
print("White acquired")
white_components = [median(l) for l in unzip3(white)]
black_components = [median(l) for l in unzip3(black)]
red_correction = (255 / (white_components[0] - black_components[0]), (-255 * black_components[0]) / (white_components[0] - black_components[0]))
green_correction = (255 / (white_components[1] - black_components[1]), (-255 * black_components[1]) / (white_components[1] - black_components[1]))
blue_correction = (255 / (white_components[2] - black_components[2]), (-255 * black_components[2]) / (white_components[2] - black_components[2]))
adjustments = [red_correction, green_correction, blue_correction]
print(adjustments)
return adjustments
def acquire_adjusted(adjustments):
value = color.rgb
pairs = zip(value, adjustments)
corrected = []
for col, (a, b) in pairs:
corrected.append((col * a) + b)
return (corrected[0], corrected[1], corrected[2])
def main():
adjustments = calibration()
print(adjustments)
while True:
print("Gimme color")
color = acquire_adjusted(adjustments)
print(color)
if raw_input() == "stop":
break
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module implements the basic functions needed
# to interface to rrdtool
#
# Author:
# Igor Sfiligoi
#
import shlex
import string
import subprocess
import time
from . import subprocessSupport
try:
import rrdtool # pylint: disable=import-error
except:
pass
class BaseRRDSupport:
#############################################################
def __init__(self, rrd_obj):
self.rrd_obj = rrd_obj
def isDummy(self):
return self.rrd_obj is None
#############################################################
# The default will do nothing
# Children should overwrite it, if needed
def get_disk_lock(self, fname):
return dummy_disk_lock()
#############################################################
# The default will do nothing
# Children should overwrite it, if needed
def get_graph_lock(self, fname):
return dummy_disk_lock()
#############################################################
def create_rrd(self, rrdfname, rrd_step, rrd_archives, rrd_ds):
"""
Create a new RRD archive
Arguments:
rrdfname - File path name of the RRD archive
rrd_step - base interval in seconds
rrd_archives - list of tuples, each containing the following fileds (in order)
CF - consolidation function (usually AVERAGE)
xff - xfiles factor (fraction that can be unknown)
steps - how many of these primary data points are used to build a consolidated data point
rows - how many generations of data values are kept
rrd_ds - a tuple containing the following fields (in order)
ds-name - attribute name
DST - Data Source Type (usually GAUGE)
heartbeat - the maximum number of seconds that may pass between two updates before it becomes unknown
min - min value
max - max value
For more details see
http://oss.oetiker.ch/rrdtool/doc/rrdcreate.en.html
"""
self.create_rrd_multi(rrdfname, rrd_step, rrd_archives, (rrd_ds,))
return
#############################################################
def create_rrd_multi(self, rrdfname, rrd_step, rrd_archives, rrd_ds_arr):
"""
Create a new RRD archive
Arguments:
rrdfname - File path name of the RRD archive
rrd_step - base interval in seconds
rrd_archives - list of tuples, each containing the following fileds (in order)
CF - consolidation function (usually AVERAGE)
xff - xfiles factor (fraction that can be unknown)
steps - how many of these primary data points are used to build a consolidated data point
rows - how many generations of data values are kept
rrd_ds_arr - list of tuples, each containing the following fields (in order)
ds-name - attribute name
DST - Data Source Type (usually GAUGE)
heartbeat - the maximum number of seconds that may pass between two updates before it becomes unknown
min - min value
max - max value
For more details see
http://oss.oetiker.ch/rrdtool/doc/rrdcreate.en.html
"""
if None == self.rrd_obj:
return # nothing to do in this case
# make the start time to be aligned on the rrd_step boundary
# This is needed for optimal resoultion selection
start_time = (int(time.time() - 1) / rrd_step) * rrd_step
# print (rrdfname,start_time,rrd_step)+rrd_ds
args = [str(rrdfname), "-b", "%li" % start_time, "-s", "%i" % rrd_step]
for rrd_ds in rrd_ds_arr:
args.append("DS:%s:%s:%i:%s:%s" % rrd_ds)
for archive in rrd_archives:
args.append("RRA:%s:%g:%i:%i" % archive)
lck = self.get_disk_lock(rrdfname)
try:
self.rrd_obj.create(*args)
finally:
lck.close()
return
#############################################################
def update_rrd(self, rrdfname, time, val):
"""
Create an RRD archive with a new value
Arguments:
rrdfname - File path name of the RRD archive
time - When was the value taken
val - What vas the value
"""
if None == self.rrd_obj:
# nothing to do in this case
return
lck = self.get_disk_lock(rrdfname)
try:
self.rrd_obj.update(str(rrdfname), "%li:%s" % (time, val))
finally:
lck.close()
return
#############################################################
def update_rrd_multi(self, rrdfname, time, val_dict):
"""
Create an RRD archive with a set of values (possibly all of the supported)
Arguments:
rrdfname - File path name of the RRD archive
time - When was the value taken
val_dict - What was the value
"""
if self.rrd_obj is None:
return # nothing to do in this case
args = [str(rrdfname)]
ds_names = sorted(val_dict.keys())
ds_names_real = []
ds_vals = []
for ds_name in ds_names:
if val_dict[ds_name] is not None:
ds_vals.append("%s" % val_dict[ds_name])
ds_names_real.append(ds_name)
if len(ds_names_real) == 0:
return
args.append("-t")
args.append(":".join(ds_names_real))
args.append(("%li:" % time) + ":".join(ds_vals))
lck = self.get_disk_lock(rrdfname)
try:
# print args
self.rrd_obj.update(*args)
finally:
lck.close()
return
#############################################################
def rrd2graph(
self,
fname,
rrd_step,
ds_name,
ds_type,
start,
end,
width,
height,
title,
rrd_files,
cdef_arr=None,
trend=None,
img_format="PNG",
):
"""
Create a graph file out of a set of RRD files
Arguments:
fname - File path name of the graph file
rrd_step - Which step should I use in the RRD files
ds_name - Which attribute should I use in the RRD files
ds_type - Which type should I use in the RRD files
start,end - Time points in utime format
width,height - Size of the graph
title - Title to put in the graph
rrd_files - list of RRD files, each being a tuple of (in order)
rrd_id - logical name of the RRD file (will be the graph label)
rrd_fname - name of the RRD file
graph_type - Graph type (LINE, STACK, AREA)
grpah_color - Graph color in rrdtool format
cdef_arr - list of derived RRD values
if present, only the cdefs will be plotted
each elsement is a tuple of (in order)
rrd_id - logical name of the RRD file (will be the graph label)
cdef_formula - Derived formula in rrdtool format
graph_type - Graph type (LINE, STACK, AREA)
grpah_color - Graph color in rrdtool format
trend - Trend value in seconds (if desired, None else)
For more details see
http://oss.oetiker.ch/rrdtool/doc/rrdcreate.en.html
"""
if None == self.rrd_obj:
return # nothing to do in this case
multi_rrd_files = []
for rrd_file in rrd_files:
multi_rrd_files.append((rrd_file[0], rrd_file[1], ds_name, ds_type, rrd_file[2], rrd_file[3]))
return self.rrd2graph_multi(
fname, rrd_step, start, end, width, height, title, multi_rrd_files, cdef_arr, trend, img_format
)
#############################################################
def rrd2graph_now(
self,
fname,
rrd_step,
ds_name,
ds_type,
period,
width,
height,
title,
rrd_files,
cdef_arr=None,
trend=None,
img_format="PNG",
):
"""
Create a graph file out of a set of RRD files
Arguments:
fname - File path name of the graph file
rrd_step - Which step should I use in the RRD files
ds_name - Which attribute should I use in the RRD files
ds_type - Which type should I use in the RRD files
period - start=now-period, end=now
width,height - Size of the graph
title - Title to put in the graph
rrd_files - list of RRD files, each being a tuple of (in order)
rrd_id - logical name of the RRD file (will be the graph label)
rrd_fname - name of the RRD file
graph_type - Graph type (LINE, STACK, AREA)
grpah_color - Graph color in rrdtool format
cdef_arr - list of derived RRD values
if present, only the cdefs will be plotted
each elsement is a tuple of (in order)
rrd_id - logical name of the RRD file (will be the graph label)
cdef_formula - Derived formula in rrdtool format
graph_type - Graph type (LINE, STACK, AREA)
grpah_color - Graph color in rrdtool format
trend - Trend value in seconds (if desired, None else)
For more details see
http://oss.oetiker.ch/rrdtool/doc/rrdcreate.en.html
"""
now = int(time.time())
start = ((now - period) / rrd_step) * rrd_step
end = ((now - 1) / rrd_step) * rrd_step
return self.rrd2graph(
fname, rrd_step, ds_name, ds_type, start, end, width, height, title, rrd_files, cdef_arr, trend, img_format
)
#############################################################
def rrd2graph_multi(
self, fname, rrd_step, start, end, width, height, title, rrd_files, cdef_arr=None, trend=None, img_format="PNG"
):
"""
Create a graph file out of a set of RRD files
Arguments:
fname - File path name of the graph file
rrd_step - Which step should I use in the RRD files
start,end - Time points in utime format
width,height - Size of the graph
title - Title to put in the graph
rrd_files - list of RRD files, each being a tuple of (in order)
rrd_id - logical name of the RRD file (will be the graph label)
rrd_fname - name of the RRD file
ds_name - Which attribute should I use in the RRD files
ds_type - Which type should I use in the RRD files
graph_type - Graph type (LINE, STACK, AREA)
graph_color - Graph color in rrdtool format
cdef_arr - list of derived RRD values
if present, only the cdefs will be plotted
each elsement is a tuple of (in order)
rrd_id - logical name of the RRD file (will be the graph label)
cdef_formula - Derived formula in rrdtool format
graph_type - Graph type (LINE, STACK, AREA)
grpah_color - Graph color in rrdtool format
trend - Trend value in seconds (if desired, None else)
img_format - format of the graph file (default PNG)
For more details see
http://oss.oetiker.ch/rrdtool/doc/rrdcreate.en.html
"""
if None == self.rrd_obj:
return # nothing to do in this case
args = [
str(fname),
"-s",
"%li" % start,
"-e",
"%li" % end,
"--step",
"%i" % rrd_step,
"-l",
"0",
"-w",
"%i" % width,
"-h",
"%i" % height,
"--imgformat",
str(img_format),
"--title",
str(title),
]
for rrd_file in rrd_files:
ds_id = rrd_file[0]
ds_fname = rrd_file[1]
ds_name = rrd_file[2]
ds_type = rrd_file[3]
if trend is None:
args.append(str(f"DEF:{ds_id}={ds_fname}:{ds_name}:{ds_type}"))
else:
args.append(str(f"DEF:{ds_id}_inst={ds_fname}:{ds_name}:{ds_type}"))
args.append(str("CDEF:%s=%s_inst,%i,TREND" % (ds_id, ds_id, trend)))
plot_arr = rrd_files
if cdef_arr is not None:
# plot the cdefs not the files themselves, when we have them
plot_arr = cdef_arr
for cdef_el in cdef_arr:
ds_id = cdef_el[0]
cdef_formula = cdef_el[1]
ds_graph_type = rrd_file[2]
ds_color = rrd_file[3]
args.append(str(f"CDEF:{ds_id}={cdef_formula}"))
else:
plot_arr = []
for rrd_file in rrd_files:
plot_arr.append((rrd_file[0], None, rrd_file[4], rrd_file[5]))
if plot_arr[0][2] == "STACK":
# add an invisible baseline to stack upon
args.append("AREA:0")
for plot_el in plot_arr:
ds_id = plot_el[0]
ds_graph_type = plot_el[2]
ds_color = plot_el[3]
args.append(f"{ds_graph_type}:{ds_id}#{ds_color}:{ds_id}")
args.append("COMMENT:Created on %s" % time.strftime(r"%b %d %H\:%M\:%S %Z %Y"))
try:
lck = self.get_graph_lock(fname)
try:
self.rrd_obj.graph(*args)
finally:
lck.close()
except:
print("Failed graph: %s" % str(args))
return args
#############################################################
def rrd2graph_multi_now(
self, fname, rrd_step, period, width, height, title, rrd_files, cdef_arr=None, trend=None, img_format="PNG"
):
"""
Create a graph file out of a set of RRD files
Arguments:
fname - File path name of the graph file
rrd_step - Which step should I use in the RRD files
period - start=now-period, end=now
width,height - Size of the graph
title - Title to put in the graph
rrd_files - list of RRD files, each being a tuple of (in order)
rrd_id - logical name of the RRD file (will be the graph label)
rrd_fname - name of the RRD file
ds_name - Which attribute should I use in the RRD files
ds_type - Which type should I use in the RRD files
graph_type - Graph type (LINE, STACK, AREA)
graph_color - Graph color in rrdtool format
cdef_arr - list of derived RRD values
if present, only the cdefs will be plotted
each elsement is a tuple of (in order)
rrd_id - logical name of the RRD file (will be the graph label)
cdef_formula - Derived formula in rrdtool format
graph_type - Graph type (LINE, STACK, AREA)
grpah_color - Graph color in rrdtool format
trend - Trend value in seconds (if desired, None else)
img_format - format of the graph file (default PNG)
For more details see
http://oss.oetiker.ch/rrdtool/doc/rrdcreate.en.html
"""
now = int(time.time())
start = ((now - period) / rrd_step) * rrd_step
end = ((now - 1) / rrd_step) * rrd_step
return self.rrd2graph_multi(
fname, rrd_step, start, end, width, height, title, rrd_files, cdef_arr, trend, img_format
)
###################################################
def fetch_rrd(self, filename, CF, resolution=None, start=None, end=None, daemon=None):
"""
Fetch will analyze the RRD and try to retrieve the data in the
resolution requested.
Arguments:
filename -the name of the RRD you want to fetch data from
CF -the consolidation function that is applied to the data
you want to fetch (AVERAGE, MIN, MAX, LAST)
resolution -the interval you want your values to have
(default 300 sec)
start -start of the time series (default end - 1day)
end -end of the time series (default now)
daemon -Address of the rrdcached daemon. If specified, a flush
command is sent to the server before reading the RRD
files. This allows rrdtool to return fresh data even
if the daemon is configured to cache values for a long
time.
For more details see
http://oss.oetiker.ch/rrdtool/doc/rrdcreate.en.html
"""
if None == self.rrd_obj:
return # nothing to do in this case
if CF in ("AVERAGE", "MIN", "MAX", "LAST"):
consolFunc = str(CF)
else:
raise RuntimeError("Invalid consolidation function %s" % CF)
args = [str(filename), consolFunc]
if resolution is not None:
args.append("-r")
args.append(str(resolution))
if end is not None:
args.append("-e")
args.append(str(end))
if start is not None:
args.append("-s")
args.append(str(start))
if daemon is not None:
args.append("--daemon")
args.append(str(daemon))
return self.rrd_obj.fetch(*args)
def verify_rrd(self, filename, expected_dict):
"""
Verifies that an rrd matches a dictionary of datastores.
This will return a tuple of arrays ([missing],[extra]) attributes
@param filename: filename of the rrd to verify
@param expected_dict: dictionary of expected values
@return: A two-tuple of arrays ([missing attrs],[extra attrs])
"""
rrd_info = self.rrd_obj.info(filename)
rrd_dict = {}
for key in list(rrd_info.keys()):
# rrdtool 1.3
if key[:3] == "ds[":
rrd_dict[key[3:].split("]")[0]] = None
# rrdtool 1.2
if key == "ds":
for dskey in list(rrd_info[key].keys()):
rrd_dict[dskey] = None
missing = []
extra = []
for t in list(expected_dict.keys()):
if t not in list(rrd_dict.keys()):
missing.append(t)
for t in list(rrd_dict.keys()):
if t not in list(expected_dict.keys()):
extra.append(t)
return (missing, extra)
# This class uses the rrdtool module for rrd_obj
class ModuleRRDSupport(BaseRRDSupport):
def __init__(self):
BaseRRDSupport.__init__(self, rrdtool)
# This class uses rrdtool cmdline for rrd_obj
class ExeRRDSupport(BaseRRDSupport):
def __init__(self):
BaseRRDSupport.__init__(self, rrdtool_exe())
# This class tries to use the rrdtool module for rrd_obj
# then tries the rrdtool cmdline
# will use None if needed
class rrdSupport(BaseRRDSupport):
def __init__(self):
try:
rrd_obj = rrdtool
except NameError:
try:
rrd_obj = rrdtool_exe()
except:
rrd_obj = None
BaseRRDSupport.__init__(self, rrd_obj)
##################################################################
# INTERNAL, do not use directly
##################################################################
##################################
# Dummy, do nothing
# Used just to get a object
class DummyDiskLock:
def close(self):
return
def dummy_disk_lock():
return DummyDiskLock()
#################################
def string_quote_join(arglist):
l2 = []
for e in arglist:
l2.append('"%s"' % e)
return " ".join(l2)
class rrdtool_exe:
"""This class is a wrapper around the rrdtool client (binary) and
is used in place of the rrdtool python module, if that one is not available
"""
def __init__(self):
self.rrd_bin = (subprocessSupport.iexe_cmd("which rrdtool").split("\n")[0]).strip()
def create(self, *args):
cmdline = f"{self.rrd_bin} create {string_quote_join(args)}"
outstr = subprocessSupport.iexe_cmd(cmdline)
return
def update(self, *args):
cmdline = f"{self.rrd_bin} update {string_quote_join(args)}"
outstr = subprocessSupport.iexe_cmd(cmdline)
return
def info(self, *args):
cmdline = f"{self.rrd_bin} info {string_quote_join(args)}"
outstr = subprocessSupport.iexe_cmd(cmdline).split("\n")
outarr = {}
for line in outstr:
if "=" in line:
linearr = line.split("=")
outarr[linearr[0].strip()] = linearr[1].strip()
return outarr
def dump(self, *args):
"""Run rrd_tool dump
Input is usually just the file name.
Output is a list of lines, as returned from rrdtool.
Args:
*args: rrdtool dump arguments, joined in single string for the command line
Returns:
str: multi-line string, output of rrd dump
"""
cmdline = f"{self.rrd_bin} dump {string_quote_join(args)}"
outstr = subprocessSupport.iexe_cmd(cmdline).decode("utf-8").split("\n")
return outstr
def restore(self, *args):
cmdline = f"{self.rrd_bin} restore {string_quote_join(args)}"
outstr = subprocessSupport.iexe_cmd(cmdline)
return
def graph(self, *args):
cmdline = f"{self.rrd_bin} graph {string_quote_join(args)}"
outstr = subprocessSupport.iexe_cmd(cmdline)
return
def fetch(self, *args):
cmdline = f"{self.rrd_bin} fetch {string_quote_join(args)}"
outstr = subprocessSupport.iexe_cmd(cmdline).split("\n")
headers = tuple(outstr.pop(0).split())
lines = []
for line in outstr:
if len(line) == 0:
continue
lines.append(tuple(float(i) if i != "-nan" else None for i in line.split()[1:]))
tstep = int(outstr[2].split(":")[0]) - int(outstr[1].split(":")[0])
ftime = int(outstr[1].split(":")[0]) - tstep
ltime = int(outstr[-2].split(":")[0])
times = (ftime, ltime, tstep)
outtup = (times, headers, lines)
return outtup
def addDataStore(filenamein, filenameout, attrlist):
"""Add a list of data stores to a rrd export file
This will essentially add attributes to the end of a rrd row
@param filenamein: filename path of a rrd exported with rrdtool dump
@param filenameout: filename path of output xml with datastores added
@param attrlist: array of datastores to add
"""
f = open(filenamein)
out = open(filenameout, "w")
parse = False
writenDS = False
for line in f:
if ("<rra>" in line) and (not writenDS):
for a in attrlist:
out.write("<ds>\n")
out.write("<name> %s </name>\n" % a)
out.write("<type> GAUGE </type>\n")
out.write("<minimal_heartbeat> 1800 </minimal_heartbeat>\n")
out.write("<min> NaN </min>\n")
out.write("<max> NaN </max>\n")
out.write("<!-- PDP Status -->\n")
out.write("<last_ds> UNKN </last_ds>\n")
out.write("<value> 0 </value>\n")
out.write("<unknown_sec> 0 </unknown_sec>\n")
out.write("</ds>\n")
writenDS = True
if "</cdp_prep>" in line:
for a in attrlist:
out.write("<ds><value> NaN </value>\n")
out.write("<unknown_datapoints> 0 </unknown_datapoints></ds>\n")
if "</database>" in line:
parse = False
if parse:
out.write(line[:-7])
for a in attrlist:
out.write("<v> NaN </v>")
out.write(line[-7:])
else:
out.write(line)
if "<database>" in line:
parse = True
|
nilq/baby-python
|
python
|
# Write a program that outputs whether today is a weekday or a weekend.
import datetime
x = datetime.datetime.now()
y = x.weekday()
z = str(input('Ask me a tricky question, like "Weekday or weekend?"'))
question = ("Weekday or weekend?")
while z == question:
if y <= 3: # if today is Monday to Thursday - program answers
print("Unfortunately today is still a weekday.")
break
elif y == 4: # If today is Friday - program answers
print("Hold on, still weekday but nearly there, my friend")
break
elif y == 5 or 6: # If today is Saturday or Sunday - program answers
print("It is weekend, thanks God!")
break
else: # If incorrect input - program answers
print("I don't understand your question")
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
"""
unittest for hello solution
"""
__author__ = "Ram Basnet"
__copyright__ = "Copyright 2020"
__license__ = "MIT"
import unittest
from hello import answer
class TestHello(unittest.TestCase):
def test1_answer(self):
self.assertEqual(answer(), 'Hello World!', "Test failed...")
if __name__ == "__main__":
unittest.main(verbosity=2)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collection of tests for :mod:`orion.core.worker.consumer`."""
import logging
import os
import signal
import subprocess
import tempfile
import time
import pytest
import orion.core.io.experiment_builder as experiment_builder
import orion.core.io.resolve_config as resolve_config
import orion.core.utils.backward as backward
import orion.core.worker.consumer as consumer
from orion.core.utils.exceptions import BranchingEvent, MissingResultFile
from orion.core.utils.format_trials import tuple_to_trial
Consumer = consumer.Consumer
@pytest.fixture
def config(exp_config):
"""Return a configuration."""
config = exp_config[0][0]
config["metadata"]["user_args"] = ["--x~uniform(-50, 50)"]
config["metadata"]["VCS"] = resolve_config.infer_versioning_metadata(
config["metadata"]["user_script"]
)
config["name"] = "exp"
config["working_dir"] = "/tmp/orion"
backward.populate_space(config)
config["space"] = config["metadata"]["priors"]
return config
@pytest.mark.usefixtures("storage")
def test_trials_interrupted_sigterm(config, monkeypatch):
"""Check if a trial is set as interrupted when a signal is raised."""
def mock_popen(self, *args, **kwargs):
os.kill(os.getpid(), signal.SIGTERM)
exp = experiment_builder.build(**config)
monkeypatch.setattr(subprocess.Popen, "wait", mock_popen)
trial = tuple_to_trial((1.0,), exp.space)
con = Consumer(exp)
with pytest.raises(KeyboardInterrupt):
con(trial)
@pytest.mark.usefixtures("storage")
def test_trial_working_dir_is_changed(config):
"""Check that trial has its working_dir attribute changed."""
exp = experiment_builder.build(**config)
trial = tuple_to_trial((1.0,), exp.space)
exp.register_trial(trial, status="reserved")
con = Consumer(exp)
con(trial)
assert trial.working_dir is not None
assert trial.working_dir == con.working_dir + "/exp_" + trial.id
def setup_code_change_mock(config, monkeypatch, ignore_code_changes):
"""Mock create experiment and trials, and infer_versioning_metadata"""
exp = experiment_builder.build(**config)
trial = tuple_to_trial((1.0,), exp.space)
exp.register_trial(trial, status="reserved")
con = Consumer(exp, ignore_code_changes=ignore_code_changes)
def code_changed(user_script):
return dict(
type="git",
is_dirty=True,
HEAD_sha="changed",
active_branch="new_branch",
diff_sha="new_diff",
)
monkeypatch.setattr(consumer, "infer_versioning_metadata", code_changed)
return con, trial
@pytest.mark.usefixtures("storage")
def test_code_changed_evc_disabled(config, monkeypatch, caplog):
"""Check that trial has its working_dir attribute changed."""
con, trial = setup_code_change_mock(config, monkeypatch, ignore_code_changes=True)
with caplog.at_level(logging.WARNING):
con(trial)
assert "Code changed between execution of 2 trials" in caplog.text
@pytest.mark.usefixtures("storage")
def test_code_changed_evc_enabled(config, monkeypatch):
"""Check that trial has its working_dir attribute changed."""
con, trial = setup_code_change_mock(config, monkeypatch, ignore_code_changes=False)
with pytest.raises(BranchingEvent) as exc:
con(trial)
assert exc.match("Code changed between execution of 2 trials")
@pytest.mark.usefixtures("storage")
def test_retrieve_result_nofile(config):
"""Test retrieve result"""
results_file = tempfile.NamedTemporaryFile(
mode="w", prefix="results_", suffix=".log", dir=".", delete=True
)
exp = experiment_builder.build(**config)
con = Consumer(exp)
with pytest.raises(MissingResultFile) as exec:
con.retrieve_results(results_file)
results_file.close()
assert exec.match(r"Cannot parse result file")
|
nilq/baby-python
|
python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from pathlib import Path
import jsonlines
import numpy as np
import paddle
import soundfile as sf
import yaml
from yacs.config import CfgNode
from parakeet.datasets.data_table import DataTable
from parakeet.models.fastspeech2 import FastSpeech2
from parakeet.models.fastspeech2 import FastSpeech2Inference
from parakeet.models.parallel_wavegan import PWGGenerator
from parakeet.models.parallel_wavegan import PWGInference
from parakeet.modules.normalizer import ZScore
def evaluate(args, fastspeech2_config, pwg_config):
# dataloader has been too verbose
logging.getLogger("DataLoader").disabled = True
# construct dataset for evaluation
with jsonlines.open(args.test_metadata, 'r') as reader:
test_metadata = list(reader)
test_dataset = DataTable(
data=test_metadata, fields=["utt_id", "text", "spk_id"])
with open(args.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
with open(args.speaker_dict, 'rt') as f:
spk_id = [line.strip().split() for line in f.readlines()]
num_speakers = len(spk_id)
print("num_speakers:", num_speakers)
odim = fastspeech2_config.n_mels
model = FastSpeech2(
idim=vocab_size,
odim=odim,
num_speakers=num_speakers,
**fastspeech2_config["model"])
model.set_state_dict(
paddle.load(args.fastspeech2_checkpoint)["main_params"])
model.eval()
vocoder = PWGGenerator(**pwg_config["generator_params"])
vocoder.set_state_dict(paddle.load(args.pwg_params))
vocoder.remove_weight_norm()
vocoder.eval()
print("model done!")
stat = np.load(args.fastspeech2_stat)
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
fastspeech2_normalizer = ZScore(mu, std)
stat = np.load(args.pwg_stat)
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
pwg_normalizer = ZScore(mu, std)
fastspeech2_inferencce = FastSpeech2Inference(fastspeech2_normalizer, model)
pwg_inference = PWGInference(pwg_normalizer, vocoder)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
for datum in test_dataset:
utt_id = datum["utt_id"]
text = paddle.to_tensor(datum["text"])
spk_id = paddle.to_tensor(datum["spk_id"])
with paddle.no_grad():
wav = pwg_inference(fastspeech2_inferencce(text, spk_id=spk_id))
sf.write(
str(output_dir / (utt_id + ".wav")),
wav.numpy(),
samplerate=fastspeech2_config.fs)
print(f"{utt_id} done!")
def main():
# parse args and config and redirect to train_sp
parser = argparse.ArgumentParser(
description="Synthesize with fastspeech2 & parallel wavegan.")
parser.add_argument(
"--fastspeech2-config", type=str, help="fastspeech2 config file.")
parser.add_argument(
"--fastspeech2-checkpoint",
type=str,
help="fastspeech2 checkpoint to load.")
parser.add_argument(
"--fastspeech2-stat",
type=str,
help="mean and standard deviation used to normalize spectrogram when training fastspeech2."
)
parser.add_argument(
"--pwg-config", type=str, help="parallel wavegan config file.")
parser.add_argument(
"--pwg-params",
type=str,
help="parallel wavegan generator parameters to load.")
parser.add_argument(
"--pwg-stat",
type=str,
help="mean and standard deviation used to normalize spectrogram when training parallel wavegan."
)
parser.add_argument(
"--phones-dict",
type=str,
default="phone_id_map.txt",
help="phone vocabulary file.")
parser.add_argument(
"--speaker-dict",
type=str,
default="speaker_id_map.txt ",
help="speaker id map file.")
parser.add_argument("--test-metadata", type=str, help="test metadata.")
parser.add_argument("--output-dir", type=str, help="output dir.")
parser.add_argument(
"--device", type=str, default="gpu", help="device type to use.")
parser.add_argument("--verbose", type=int, default=1, help="verbose.")
args = parser.parse_args()
with open(args.fastspeech2_config) as f:
fastspeech2_config = CfgNode(yaml.safe_load(f))
with open(args.pwg_config) as f:
pwg_config = CfgNode(yaml.safe_load(f))
print("========Args========")
print(yaml.safe_dump(vars(args)))
print("========Config========")
print(fastspeech2_config)
print(pwg_config)
evaluate(args, fastspeech2_config, pwg_config)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import time
from os import environ
import grpc
import lnd_grpc.protos.rpc_pb2 as ln
import lnd_grpc.protos.rpc_pb2_grpc as lnrpc
from lnd_grpc.base_client import BaseClient
from lnd_grpc.config import defaultNetwork, defaultRPCHost, defaultRPCPort
# tell gRPC which cypher suite to use
environ["GRPC_SSL_CIPHER_SUITES"] = (
"ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:"
"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:"
"ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384"
)
class Lightning(BaseClient):
"""
A class which interacts with the LND Lightning sub-system
"""
def __init__(
self,
lnd_dir: str = None,
macaroon_path: str = None,
tls_cert_path: str = None,
network: str = defaultNetwork,
grpc_host: str = defaultRPCHost,
grpc_port: str = defaultRPCPort,
):
self._lightning_stub: lnrpc.LightningStub = None
self.version = None
super().__init__(
lnd_dir=lnd_dir,
macaroon_path=macaroon_path,
tls_cert_path=tls_cert_path,
network=network,
grpc_host=grpc_host,
grpc_port=grpc_port,
)
@property
def version(self):
"""
:return: version of LND running
"""
if self._version:
return self._version
self._version = self.get_info().version.split(" ")[0]
return self._version
@version.setter
def version(self, version: str):
self._version = version
@staticmethod
def pack_into_channelbackups(single_backup):
"""
This function will accept either an ln.ChannelBackup object as generated by
export_chan_backup() or should be passed a single channel backup from
export_all_channel_backups().single_chan_backups[index].
It will then return a single channel backup packed into a ChannelBackups
format as required by verify_chan_backup()
"""
return ln.ChannelBackups(chan_backups=[single_backup])
@property
def lightning_stub(self) -> lnrpc.LightningStub:
"""
Create the lightning stub used to interface with the Lightning sub-system.
Connectivity to LND is monitored using a callback to the channel and if
connection status changes the stub will be dynamically regenerated on next call.
This helps to overcome issues where a sub-system is not active when the stub is
created (e.g. calling Lightning sub-system when wallet not yet unlocked) which
otherwise requires manual monitoring and regeneration
"""
# if the stub is already created and channel might recover, return current stub
if self._lightning_stub is not None and self.connection_status_change is False:
return self._lightning_stub
# otherwise, start by creating a fresh channel
self.channel = grpc.secure_channel(
target=self.grpc_address,
credentials=self.combined_credentials,
options=self.grpc_options,
)
# subscribe to channel connectivity updates with callback
self.channel.subscribe(self.connectivity_event_logger)
# create the new stub
self._lightning_stub = lnrpc.LightningStub(self.channel)
# 'None' is channel_status's initialization state.
# ensure connection_status_change is True to keep regenerating fresh stubs until
# channel comes online
if self.connection_status is None:
self.connection_status_change = True
return self._lightning_stub
self.connection_status_change = False
return self._lightning_stub
def wallet_balance(self):
"""
Get (bitcoin) wallet balance, not in channels
:return: WalletBalanceResponse with 3 attributes: 'total_balance',
'confirmed_balance', 'unconfirmed_balance'
"""
request = ln.WalletBalanceRequest()
response = self.lightning_stub.WalletBalance(request)
return response
def channel_balance(self):
"""
Get total channel balance and pending channel balance
:return: ChannelBalanceResponse with 2 attributes: 'balance' and
'pending_open_balance'
"""
request = ln.ChannelBalanceRequest()
response = self.lightning_stub.ChannelBalance(request)
return response
def get_transactions(self):
"""
Describe all the known transactions relevant to the wallet
:returns: TransactionDetails with 1 attribute: 'transactions', containing a list
of all transactions
"""
request = ln.GetTransactionsRequest()
response = self.lightning_stub.GetTransactions(request)
return response
# TODO: add estimate_fee
# On Chain
def send_coins(self, addr: str, amount: int = None, **kwargs):
"""
Allows sending coins to a single output
If neither target_conf or sat_per_byte are set, wallet will use internal fee
model
:return: SendCoinsResponse with 1 attribute: 'txid'
"""
request = ln.SendCoinsRequest(addr=addr, amount=amount, **kwargs)
response = self.lightning_stub.SendCoins(request)
return response
def list_unspent(self, min_confs: int, max_confs: int):
"""
Lists unspent UTXOs controlled by the wallet between the chosen confirmations
:return: ListUnspentResponse with 1 attribute: 'utxo', which itself contains a
list of utxos
"""
request = ln.ListUnspentRequest(min_confs=min_confs, max_confs=max_confs)
response = self.lightning_stub.ListUnspent(request)
return response
# Response-streaming RPC
def subscribe_transactions(self):
"""
Creates a uni-directional stream from the server to the client in which any
newly discovered transactions relevant to the wallet are sent over
:return: iterable of Transactions with 8 attributes per response. See the notes
on threading and iterables in README.md
"""
request = ln.GetTransactionsRequest()
return self.lightning_stub.SubscribeTransactions(request)
def send_many(self, addr_to_amount: ln.SendManyRequest.AddrToAmountEntry, **kwargs):
"""
Send a single transaction involving multiple outputs
:return: SendManyResponse with 1 attribute: 'txid'
"""
request = ln.SendManyRequest(AddrToAmount=addr_to_amount, **kwargs)
response = self.lightning_stub.SendMany(request)
return response
def new_address(self, address_type: str):
"""
Create a new wallet address of either p2wkh or np2wkh type.
:return: NewAddressResponse with 1 attribute: 'address'
"""
if address_type == "p2wkh":
request = ln.NewAddressRequest(type="WITNESS_PUBKEY_HASH")
elif address_type == "np2wkh":
request = ln.NewAddressRequest(type="NESTED_PUBKEY_HASH")
else:
return TypeError(
"invalid address type %s, supported address type are: p2wkh and np2wkh"
% address_type
)
response = self.lightning_stub.NewAddress(request)
return response
def sign_message(self, msg: str):
"""
Returns the signature of the message signed with this node’s private key.
The returned signature string is zbase32 encoded and pubkey recoverable, meaning
that only the message digest and signature are needed for verification.
:return: SignMessageResponse with 1 attribute: 'signature'
"""
_msg_bytes = msg.encode("utf-8")
request = ln.SignMessageRequest(msg=_msg_bytes)
response = self.lightning_stub.SignMessage(request)
return response
def verify_message(self, msg: str, signature: str):
"""
Verifies a signature over a msg. The signature must be zbase32 encoded and
signed by an active node in the resident node’s channel database. In addition to
returning the validity of the signature, VerifyMessage also returns the
recovered pubkey from the signature.
:return: VerifyMessageResponse with 2 attributes: 'valid' and 'pubkey'
"""
_msg_bytes = msg.encode("utf-8")
request = ln.VerifyMessageRequest(msg=_msg_bytes, signature=signature)
response = self.lightning_stub.VerifyMessage(request)
return response
def connect_peer(
self, addr: ln.LightningAddress, perm: bool = 0, timeout: int = None
):
"""
Attempts to establish a connection to a remote peer. This is at the networking
level, and is used for communication between nodes. This is distinct from
establishing a channel with a peer.
:return: ConnectPeerResponse with no attributes
"""
request = ln.ConnectPeerRequest(addr=addr, perm=perm)
response = self.lightning_stub.ConnectPeer(request, timeout=timeout)
return response
def connect(self, address: str, perm: bool = 0, timeout: int = None):
"""
Custom function which allows passing address in a more natural
"pubkey@127.0.0.1:9735" string format into connect_peer()
:return: ConnectPeerResponse with no attributes
"""
pubkey, host = address.split("@")
_address = self.lightning_address(pubkey=pubkey, host=host)
response = self.connect_peer(addr=_address, perm=perm, timeout=timeout)
return response
def disconnect_peer(self, pub_key: str):
"""
attempts to disconnect one peer from another identified by a given pubKey.
In the case that we currently have a pending or active channel with the target
peer, then this action will be not be allowed.
:return: DisconnectPeerResponse with no attributes
"""
request = ln.DisconnectPeerRequest(pub_key=pub_key)
response = self.lightning_stub.DisconnectPeer(request)
return response
def list_peers(self):
"""
returns a verbose listing of all currently active peers
:return: ListPeersResponse.peers with no attributes
"""
request = ln.ListPeersRequest()
response = self.lightning_stub.ListPeers(request)
return response.peers
def get_info(self):
"""
returns general information concerning the lightning node including it’s
identity pubkey, alias, the chains it is connected to, and information
concerning the number of open+pending channels.
:return: GetInfoResponse with 14 attributes
"""
request = ln.GetInfoRequest()
response = self.lightning_stub.GetInfo(request)
return response
def pending_channels(self):
"""
returns a list of all the channels that are currently considered “pending”.
A channel is pending if it has finished the funding workflow and is waiting for
confirmations for the funding txn, or is in the process of closure, either
initiated cooperatively or non-cooperatively
:return: PendingChannelsResponse with 5 attributes: 'total_limbo_balance',
'pending_open_channels', 'pending_closing_channels',
'pending_force_closing_channels' and 'waiting_close_channels'
"""
request = ln.PendingChannelsRequest()
response = self.lightning_stub.PendingChannels(request)
return response
def list_channels(self, **kwargs):
"""
returns a description of all the open channels that this node is a participant
in.
:return: ListChannelsResponse with 1 attribute: 'channels' that contains a list
of the channels queried
"""
request = ln.ListChannelsRequest(**kwargs)
response = self.lightning_stub.ListChannels(request)
return response.channels
def closed_channels(self, **kwargs):
"""
returns a description of all the closed channels that this node was a
participant in.
:return: ClosedChannelsResponse with 1 attribute: 'channels'
"""
request = ln.ClosedChannelsRequest(**kwargs)
response = self.lightning_stub.ClosedChannels(request)
return response.channels
def open_channel_sync(self, local_funding_amount: int, **kwargs):
"""
synchronous version of the OpenChannel RPC call. This call is meant to be
consumed by clients to the REST proxy. As with all other sync calls, all byte
slices are intended to be populated as hex encoded strings.
:return: ChannelPoint with 3 attributes: 'funding_txid_bytes', 'funding_tx_str'
and 'output_index'
"""
request = ln.OpenChannelRequest(
local_funding_amount=local_funding_amount, **kwargs
)
response = self.lightning_stub.OpenChannelSync(request)
return response
# Response-streaming RPC
def open_channel(self, local_funding_amount: int, timeout: int = None, **kwargs):
"""
attempts to open a singly funded channel specified in the request to a remote
peer. Users are able to specify a target number of blocks that the funding
transaction should be confirmed in, or a manual fee rate to us for the funding
transaction. If neither are specified, then a lax block confirmation target is
used.
:return: an iterable of OpenChannelStatusUpdates. See the notes on threading and
iterables in README.md
"""
# TODO: implement `lncli openchannel --connect` function
request = ln.OpenChannelRequest(
local_funding_amount=local_funding_amount, **kwargs
)
if request.node_pubkey == b"":
request.node_pubkey = bytes.fromhex(request.node_pubkey_string)
return self.lightning_stub.OpenChannel(request, timeout=timeout)
# Response-streaming RPC
def close_channel(self, channel_point, **kwargs):
"""
attempts to close an active channel identified by its channel outpoint
(ChannelPoint). The actions of this method can additionally be augmented to
attempt a force close after a timeout period in the case of an inactive peer.
If a non-force close (cooperative closure) is requested, then the user can
specify either a target number of blocks until the closure transaction is
confirmed, or a manual fee rate. If neither are specified, then a default
lax, block confirmation target is used.
:return: an iterable of CloseChannelStatusUpdates with 2 attributes per
response. See the notes on threading and iterables in README.md
"""
funding_txid, output_index = channel_point.split(":")
_channel_point = self.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
request = ln.CloseChannelRequest(channel_point=_channel_point, **kwargs)
return self.lightning_stub.CloseChannel(request)
def close_all_channels(self, inactive_only: bool = 0):
"""
Custom function which iterates over all channels and closes them sequentially
using close_channel()
:return: CloseChannelStatusUpdate for each channel close, with 2 attributes:
'close_pending' and 'chan_close'
"""
if not inactive_only:
for channel in self.list_channels():
self.close_channel(channel_point=channel.channel_point).next()
if inactive_only:
for channel in self.list_channels(inactive_only=1):
self.close_channel(channel_point=channel.channel_point).next()
def abandon_channel(self, channel_point: ln.ChannelPoint):
"""
removes all channel state from the database except for a close summary.
This method can be used to get rid of permanently unusable channels due to bugs
fixed in newer versions of lnd.
Only available when in debug builds of lnd.
:return: AbandonChannelResponse with no attributes
"""
funding_txid, output_index = channel_point.split(":")
_channel_point = self.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
request = ln.AbandonChannelRequest(channel_point=_channel_point)
response = self.lightning_stub.AbandonChannel(request)
return response
@staticmethod
def send_request_generator(**kwargs):
"""
Creates the SendRequest object for the synchronous streaming send_payment() as a
generator
:return: generator object for the request
"""
# Commented out to complement the magic sleep below...
# while True:
request = ln.SendRequest(**kwargs)
yield request
# Magic sleep which tricks the response to the send_payment() method to actually
# contain data...
time.sleep(5)
# Bi-directional streaming RPC
def send_payment(self, **kwargs):
"""
dispatches a bi-directional streaming RPC for sending payments through the
Lightning Network. A single RPC invocation creates a persistent bi-directional
stream allowing clients to rapidly send payments through the Lightning Network
with a single persistent connection.
:return: an iterable of SendResponses with 4 attributes per response.
See the notes on threading and iterables in README.md
"""
# Use payment request as first choice
if "payment_request" in kwargs:
params = {"payment_request": kwargs["payment_request"]}
if "amt" in kwargs:
params["amt"] = kwargs["amt"]
request_iterable = self.send_request_generator(**params)
else:
# Helper to convert hex to bytes automatically
try:
if "payment_hash" not in kwargs:
kwargs["payment_hash"] = bytes.fromhex(
kwargs["payment_hash_string"]
)
if "dest" not in kwargs:
kwargs["dest"] = bytes.fromhex(kwargs["dest_string"])
except ValueError as e:
raise e
request_iterable = self.send_request_generator(**kwargs)
return self.lightning_stub.SendPayment(request_iterable)
# Synchronous non-streaming RPC
def send_payment_sync(self, **kwargs):
"""
synchronous non-streaming version of SendPayment. This RPC is intended to be
consumed by clients of the REST proxy. Additionally, this RPC expects the
destination’s public key and the payment hash (if any) to be encoded as hex
strings.
:return: SendResponse with up to 4 attributes: 'payment_error' (conditional),
'payment_preimage', 'payment_route' and 'payment_hash'
"""
# Use payment request as first choice
if "payment_request" in kwargs:
params = {"payment_request": kwargs["payment_request"]}
if "amt" in kwargs:
params["amt"] = kwargs["amt"]
request = ln.SendRequest(**params)
else:
request = ln.SendRequest(**kwargs)
response = self.lightning_stub.SendPaymentSync(request)
return response
def pay_invoice(self, payment_request: str):
"""
Custom function which only takes a payment request and pays the invoice using
the asynchronous send_payment_sync()
:return: SendResponse with up to 4 attributes: 'payment_error' (conditional),
'payment_preimage', 'payment_route' and 'payment_hash'
"""
response = self.send_payment_sync(payment_request=payment_request)
return response
@staticmethod
def send_to_route_generator(invoice, route):
"""
create SendToRouteRequest generator
:return: generator of SendToRouteRequest
"""
# Commented out to complement the magic sleep below...
# while True:
request = ln.SendToRouteRequest(payment_hash=invoice.r_hash, route=route)
yield request
# Magic sleep which tricks the response to the send_to_route() method to
# actually contain data...
time.sleep(5)
# Bi-directional streaming RPC
def send_to_route(self, invoice, route):
"""
bi-directional streaming RPC for sending payment through the Lightning Network.
This method differs from SendPayment in that it allows users to specify a full
route manually.
This can be used for things like rebalancing, and atomic swaps.
:return: an iterable of SendResponses with 4 attributes per response.
See the notes on threading and iterables in README.md
"""
request_iterable = self.send_to_route_generator(invoice=invoice, route=route)
return self.lightning_stub.SendToRoute(request_iterable)
# Synchronous non-streaming RPC
def send_to_route_sync(self, route, **kwargs):
"""
a synchronous version of SendToRoute. It Will block until the payment either
fails or succeeds.
:return: SendResponse with up to 4 attributes: 'payment_error' (conditional),
'payment_preimage', 'payment_route' and 'payment_hash'
"""
request = ln.SendToRouteRequest(route=route, **kwargs)
response = self.lightning_stub.SendToRouteSync(request)
return response
def add_invoice(
self,
memo: str = "",
value: int = 0,
expiry: int = 3600,
creation_date: int = int(time.time()),
**kwargs
):
"""
attempts to add a new invoice to the invoice database. Any duplicated invoices
are rejected, therefore all invoices must have a unique payment preimage.
:return: AddInvoiceResponse with 3 attributes: 'r_hash', 'payment_request' and
'add_index'
"""
request = ln.Invoice(
memo=memo, value=value, expiry=expiry, creation_date=creation_date, **kwargs
)
response = self.lightning_stub.AddInvoice(request)
return response
def list_invoices(self, reversed: bool = 1, **kwargs):
"""
returns a list of all the invoices currently stored within the database.
Any active debug invoices are ignored. It has full support for paginated
responses, allowing users to query for specific invoices through their
add_index. This can be done by using either the first_index_offset or
last_index_offset fields included in the response as the index_offset of the
next request. By default, the first 100 invoices created will be returned.
Backwards pagination is also supported through the Reversed flag.
:return: ListInvoiceResponse with 3 attributes: 'invoices' containing a list of
queried invoices, 'last_index_offset' and 'first_index_offset'
"""
request = ln.ListInvoiceRequest(reversed=reversed, **kwargs)
response = self.lightning_stub.ListInvoices(request)
return response
def lookup_invoice(self, **kwargs):
"""
attempts to look up an invoice according to its payment hash.
The passed payment hash must be exactly 32 bytes, if not, an error is returned.
:return: Invoice with 21 attributes
"""
request = ln.PaymentHash(**kwargs)
response = self.lightning_stub.LookupInvoice(request)
return response
def subscribe_invoices(self, **kwargs):
"""
a uni-directional stream (server -> client) for notifying the client of newly
added/settled invoices. The caller can optionally specify the add_index and/or
the settle_index. If the add_index is specified, then we’ll first start by
sending add invoice events for all invoices with an add_index greater than the
specified value. If the settle_index is specified, the next, we’ll send out all
settle events for invoices with a settle_index greater than the specified value.
One or both of these fields can be set.
If no fields are set, then we’ll only send out the latest add/settle events.
:return: an iterable of Invoice objects with 21 attributes per response.
See the notes on threading and iterables in README.md
"""
request = ln.InvoiceSubscription(**kwargs)
return self.lightning_stub.SubscribeInvoices(request)
def decode_pay_req(self, pay_req: str):
"""
takes an encoded payment request string and attempts to decode it, returning a
full description of the conditions encoded within the payment request.
:return: PayReq with 10 attributes
"""
request = ln.PayReqString(pay_req=pay_req)
response = self.lightning_stub.DecodePayReq(request)
return response
def list_payments(self):
"""
returns a list of all outgoing payments
:return: ListPaymentsResponse with 1 attribute: 'payments', containing a list
of payments
"""
request = ln.ListPaymentsRequest()
response = self.lightning_stub.ListPayments(request)
return response
def delete_all_payments(self):
"""
deletes all outgoing payments from DB.
:return: DeleteAllPaymentsResponse with no attributes
"""
request = ln.DeleteAllPaymentsRequest()
response = self.lightning_stub.DeleteAllPayments(request)
return response
def describe_graph(self, **kwargs):
"""
a description of the latest graph state from the point of view of the node.
The graph information is partitioned into two components: all the
nodes/vertexes, and all the edges that connect the vertexes themselves.
As this is a directed graph, the edges also contain the node directional
specific routing policy which includes: the time lock delta, fee information etc
:return: ChannelGraph object with 2 attributes: 'nodes' and 'edges'
"""
request = ln.ChannelGraphRequest(**kwargs)
response = self.lightning_stub.DescribeGraph(request)
return response
def get_chan_info(self, chan_id: int):
"""
the latest authenticated network announcement for the given channel identified
by its channel ID: an 8-byte integer which uniquely identifies the location of
transaction’s funding output within the blockchain.
:return: ChannelEdge object with 8 attributes
"""
request = ln.ChanInfoRequest(chan_id=chan_id)
response = self.lightning_stub.GetChanInfo(request)
return response
# Uni-directional stream
def subscribe_channel_events(self):
"""
creates a uni-directional stream from the server to the client in which any
updates relevant to the state of the channels are sent over. Events include new
active channels, inactive channels, and closed channels.
:return: an iterator of ChannelEventUpdate objects with 5 attributes per
response. See the notes on threading and iterables in README.md
"""
request = ln.ChannelEventSubscription()
return self.lightning_stub.SubscribeChannelEvents(request)
def get_node_info(self, pub_key: str):
"""
returns the latest advertised, aggregated, and authenticated channel information
for the specified node identified by its public key.
:return: NodeInfo object with 3 attributes: 'node', 'num_channels' and
'total_capacity'
"""
request = ln.NodeInfoRequest(pub_key=pub_key)
response = self.lightning_stub.GetNodeInfo(request)
return response
def query_routes(self, pub_key: str, amt: int, **kwargs):
"""
attempts to query the daemon’s Channel Router for a possible route to a target
destination capable of carrying a specific amount of satoshis.
The returned route contains the full details required to craft and send an HTLC,
also including the necessary information that should be present within the
Sphinx packet encapsulated within the HTLC.
:return: QueryRoutesResponse object with 1 attribute: 'routes' which contains a
single route
"""
request = ln.QueryRoutesRequest(pub_key=pub_key, amt=amt, **kwargs)
response = self.lightning_stub.QueryRoutes(request)
return response.routes
def get_network_info(self):
"""
returns some basic stats about the known channel graph from the point of view of
the node.
:return: NetworkInfo object with 10 attributes
"""
request = ln.NetworkInfoRequest()
response = self.lightning_stub.GetNetworkInfo(request)
return response
def stop_daemon(self):
"""
will send a shutdown request to the interrupt handler, triggering a graceful
shutdown of the daemon.
:return: StopResponse with no attributes
"""
request = ln.StopRequest()
response = self.lightning_stub.StopDaemon(request)
return response
# Response-streaming RPC
def subscribe_channel_graph(self):
"""
launches a streaming RPC that allows the caller to receive notifications upon
any changes to the channel graph topology from the point of view of the
responding node.
Events notified include: new nodes coming online, nodes updating their
authenticated attributes, new channels being advertised, updates in the routing
policy for a directional channel edge, and when channels are closed on-chain.
:return: iterable of GraphTopologyUpdate with 3 attributes: 'node_updates',
'channel_updates' and 'closed_chans'
"""
request = ln.GraphTopologySubscription()
return self.lightning_stub.SubscribeChannelGraph(request)
def debug_level(self, **kwargs):
"""
allows a caller to programmatically set the logging verbosity of lnd.
The logging can be targeted according to a coarse daemon-wide logging level, or
in a granular fashion to specify the logging for a target sub-system.
Usage: client.debug_level(level_spec='debug')
:return: DebugLevelResponse with 1 attribute: 'sub_systems'
"""
request = ln.DebugLevelRequest(**kwargs)
response = self.lightning_stub.DebugLevel(request)
return response
def fee_report(self):
"""
allows the caller to obtain a report detailing the current fee schedule enforced
by the node globally for each channel.
:return: FeeReportResponse with 4 attributes: 'channel_fees', 'day_fee_sum',
'week_fee_sum' and 'month_fee_sum'
"""
request = ln.FeeReportRequest()
response = self.lightning_stub.FeeReport(request)
return response
def update_channel_policy(
self,
chan_point: str,
is_global: bool = False,
base_fee_msat: int = 1000,
fee_rate: float = 0.000001,
time_lock_delta: int = 144,
):
"""
allows the caller to update the fee schedule and channel policies for all
channels globally, or a particular channel.
:return: PolicyUpdateResponse with no attributes
"""
if chan_point:
funding_txid, output_index = chan_point.split(":")
channel_point = self.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
else:
channel_point = None
request = ln.PolicyUpdateRequest(
chan_point=channel_point,
base_fee_msat=base_fee_msat,
fee_rate=fee_rate,
time_lock_delta=time_lock_delta,
)
if is_global:
setattr(request, "global", is_global)
response = self.lightning_stub.UpdateChannelPolicy(request)
return response
def forwarding_history(self, **kwargs):
"""
allows the caller to query the htlcswitch for a record of all HTLCs forwarded
within the target time range, and integer offset within that time range.
If no time-range is specified, then the first chunk of the past 24 hrs of
forwarding history are returned.
A list of forwarding events are returned.
The size of each forwarding event is 40 bytes, and the max message size able to
be returned in gRPC is 4 MiB.
As a result each message can only contain 50k entries.
Each response has the index offset of the last entry.
The index offset can be provided to the request to allow the caller to skip a
series of records.
:return: ForwardingHistoryResponse with 2 attributes: 'forwarding_events' and
'last_index_offset'
"""
request = ln.ForwardingHistoryRequest(**kwargs)
response = self.lightning_stub.ForwardingHistory(request)
return response
"""
Static channel backup
"""
def export_chan_backup(self, **kwargs):
"""
attempts to return an encrypted static channel backup for the target channel
identified by its channel point.
The backup is encrypted with a key generated from the aezeed seed of the user.
The returned backup can either be restored using the RestoreChannelBackup
method once lnd is running, or via the InitWallet and UnlockWallet methods from
the WalletUnlocker service.
:return: ChannelBackup with 2 attributes: 'chan_point' and 'chan_backup'
"""
request = ln.ExportChannelBackupRequest(**kwargs)
response = self.lightning_stub.ExportChannelBackup(request)
return response
def export_all_channel_backups(self, **kwargs):
"""
returns static channel backups for all existing channels known to lnd.
A set of regular singular static channel backups for each channel are returned.
Additionally, a multi-channel backup is returned as well, which contains a
single encrypted blob containing the backups of each channel.
:return: ChanBackupSnapshot with 2 attributes: 'single_chan_backups' and
'multi_chan_backup'
"""
request = ln.ChanBackupExportRequest(**kwargs)
response = self.lightning_stub.ExportAllChannelBackups(request)
return response
def verify_chan_backup(self, **kwargs):
"""
allows a caller to verify the integrity of a channel backup snapshot.
This method will accept either a packed Single or a packed Multi.
Specifying both will result in an error.
For multi_backup: works as expected.
For single_chan_backups:
Needs to be passed a single channel backup (ChannelBackup) packed into a
ChannelBackups to verify sucessfully.
export_chan_backup() returns a ChannelBackup but it is not packed properly.
export_all_channel_backups().single_chan_backups returns a ChannelBackups but as
it contains more than one channel, verify_chan_backup() will also reject it.
Use helper method pack_into_channelbackups() to pack individual ChannelBackup
objects into the appropriate ChannelBackups objects for verification.
:return: VerifyChanBackupResponse with no attributes
"""
request = ln.ChanBackupSnapshot(**kwargs)
response = self.lightning_stub.VerifyChanBackup(request)
return response
def restore_chan_backup(self, **kwargs):
"""
accepts a set of singular channel backups, or a single encrypted multi-chan
backup and attempts to recover any funds remaining within the channel.
If we are able to unpack the backup, then the new channel will be shown under
listchannels, as well as pending channels.
:return: RestoreBackupResponse with no attributes
"""
request = ln.RestoreChanBackupRequest(**kwargs)
response = self.lightning_stub.RestoreChannelBackups(request)
return response
# Response-streaming RPC
def subscribe_channel_backups(self, **kwargs):
"""
allows a client to sub-subscribe to the most up to date information concerning
the state of all channel backups. Each time a new channel is added, we return
the new set of channels, along with a multi-chan backup containing the backup
info for all channels.
Each time a channel is closed, we send a new update, which contains new new chan
backups, but the updated set of encrypted multi-chan backups with the closed
channel(s) removed.
:return: iterable of ChanBackupSnapshot responses, with 2 attributes per
response: 'single_chan_backups' and 'multi_chan_backup'
"""
request = ln.ChannelBackupSubscription(**kwargs)
response = self.lightning_stub.SubscribeChannelBackups(request)
return response
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.7 on 2019-05-15 13:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("ecommerce", "0010_remove_ecommerce_course_run_enrollment"),
("courses", "0007_add_enrollment_models"),
]
operations = [
migrations.AddField(
model_name="courserunenrollment",
name="company",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="ecommerce.Company",
),
),
migrations.AddField(
model_name="programenrollment",
name="company",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="ecommerce.Company",
),
),
]
|
nilq/baby-python
|
python
|
from django.test import TestCase
from django.urls import reverse
from books.models import Book, Genre
class GenresListViewTest(TestCase):
def test_uses_genres_list_template(self):
response = self.client.get(reverse('books:genres-list'))
self.assertTemplateUsed(response, "books/genres_list.html")
def test_displays_existing_genre(self):
Genre.objects.create(title="Fantasy")
response = self.client.get(reverse('books:genres-list'))
self.assertContains(response, "Fantasy")
class GenreDetailsViewTest(TestCase):
def test_uses_genre_details_template(self):
genre = Genre.objects.create(title="Fantasy")
response = self.client.get(
reverse('books:genre-details', args=[genre.id]))
self.assertTemplateUsed(response, "books/genre_details.html")
def test_display_correct_genre(self):
genre = Genre.objects.create(title="Fantasy")
response = self.client.get(
reverse("books:genre-details", args=[genre.id]))
self.assertContains(response, "Fantasy")
self.assertContains(response, "Genre Fantasy")
def test_display_correct_genre_books(self):
first_genre = Genre.objects.create(title="Fantasy")
first_book = Book()
first_book.title = "Lord of the Rings"
first_book.save()
first_book.genres.add(first_genre)
response = self.client.get(
reverse('books:genre-details', args=[first_genre.id])
)
self.assertContains(response, "Fantasy")
self.assertContains(response, "Lord of the Rings")
class BooksListViewTest(TestCase):
def test_uses_books_list_template(self):
response = self.client.get(reverse('books:books-list'))
self.assertTemplateUsed(response, "books/books_list.html")
def test_displays_existing_book(self):
Book.objects.create(title="Les Miserables")
response = self.client.get(reverse('books:books-list'))
self.assertContains(response, "Les Miserables")
|
nilq/baby-python
|
python
|
# coding : utf-8
class Route:
def __init__(self, bp, prefix):
self.bp = bp
self.prefix = prefix
|
nilq/baby-python
|
python
|
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from altfe.interface.root import interRoot
from app.lib.core.dl.model.dler_aria2 import Aria2Dler
from app.lib.core.dl.model.dler_dl import DlDler
from app.lib.core.dl.model.dler_dl_single import DlSingleDler
@interRoot.bind("dl", "LIB_CORE")
class core_module_dl(interRoot):
def __init__(self):
self.WAYS = {"aria2": Aria2Dler, "dl": DlDler, "dl-single": DlSingleDler}
self.modName = None
self.mod = None
self.sets = self.loadConfig(self.getENV("rootPath") + "config.yml")
self.tasks = {}
self._lock = threading.Lock()
self._pool = ThreadPoolExecutor(max_workers=self.sets["biu"]["download"]["maxDownloading"])
self.auto()
def __del__(self):
for key in self.tasks:
self.cancel(key)
self._pool.shutdown(False)
def auto(self):
mode = self.sets["biu"]["download"]["mode"] \
if self.sets["biu"]["download"]["mode"] in self.WAYS \
else "dl-single"
if mode == "aria2":
a2 = (self.sets["biu"]["download"]["aria2Host"].split(":"), self.sets["biu"]["download"]["aria2Secret"])
self.WAYS[mode].HOST = a2[0][0]
self.WAYS[mode].PORT = a2[0][1]
self.WAYS[mode].SECRET = a2[1]
self.mod = self.WAYS[mode]
self.modName = mode
return self
def add(self, key, args):
group = [self.mod(**kw) for kw in args]
self._lock.acquire()
self.tasks[key] = group
self._lock.release()
for obj in group:
self._pool.submit(obj.run)
return True
def cancel(self, key):
r = []
if key in self.tasks:
for x in self.tasks[key]:
r.append(x.cancel())
return r
def status(self, key="__all__"):
r = {}
if key == "__all__":
for x in self.tasks.copy():
r[x] = (self._status(x))
else:
if key in self.tasks:
return self._status(key)
return r
def _status(self, key):
if key not in self.tasks:
return []
r = []
group = self.tasks[key]
for obj in group:
tmp = "unknown"
if obj.status(DlDler.CODE_GOOD_SUCCESS):
tmp = "done"
elif obj.status(DlDler.CODE_GOOD):
tmp = "running"
elif obj.status(DlDler.CODE_WAIT):
tmp = "waiting"
elif obj.status(DlDler.CODE_BAD):
tmp = "failed"
r.append(tmp)
return r
def info(self, key="__all__"):
r = {}
if key == "__all__":
for x in self.tasks:
r[x] = (self._info(x))
else:
if key in self.tasks:
return self._info(key)
return r
def _info(self, key):
if key not in self.tasks:
return {}
r = {}
totalSize = 0
totalIngSize = 0
totalIngSpeed = 0
group = self.tasks[key]
tmp = [obj.info() for obj in group]
for x in tmp:
totalSize += x["size"]
totalIngSize += x["ingSize"]
totalIngSpeed += x["ingSpeed"]
r = {
"totalSize": totalSize,
"totalIngSize": totalIngSize,
"totalIngSpeed": totalIngSpeed,
"tasks": tmp
}
return r
|
nilq/baby-python
|
python
|
from yowsup.layers.protocol_ib.protocolentities.ib import IbProtocolEntity
from yowsup.structs import ProtocolTreeNode
from yowsup.structs.protocolentity import ProtocolEntityTest
import unittest
class IbProtocolEntityTest(ProtocolEntityTest, unittest.TestCase):
def setUp(self):
self.ProtocolEntity = IbProtocolEntity
self.node = ProtocolTreeNode("ib")
|
nilq/baby-python
|
python
|
from .general import *
from .run import *
from .project import *
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 20 03:47:45 2020
@author: Maryam
"""
import numpy as np
import argparse
import pickle
import time
from scipy.sparse.linalg import svds
from utils.read_preprocss_data import read_preprocss_data
parser = argparse.ArgumentParser()
# Set Path
parser.add_argument("--DATAPATH",
default='../datasets/',
help='Filename for datasets')
parser.add_argument("--RESULTPATH",
default='../results/',
help='Filename for saving the results')
# Preprocessing
parser.add_argument('--metadata', action='store_true',
help='whether use metadata or not')
parser.add_argument('--fillnan', choices=['mean_row','mean_col'],
default='mean_col',
help='Whether fill NaN with the mean of rows or columns')
# Similarity
parser.add_argument('--sim_method', choices=['sigmoid_kernel','cosine_similarity'],
default='cosine_similarity',
help='What type of similarity method should use')
# Spectral clustering
parser.add_argument('--norm_laplacian_k', type=int, default=5,
help='k in laplacian normalization and its eigen vector clustering')
parser.add_argument('--normalize_laplacian', action='store_true',
help='whether normalize laplacian or not')
# Kmeans
parser.add_argument('--kmeans_k', type=int, default=5,
help='number of clusters in kmeans')
# train
parser.add_argument('--n_epochs', type=int, default=100,
help='number of epochs')
parser.add_argument('--test_prc', type=float, default=0.1,
help='percentage for test dataset')
parser.add_argument('--graph_nodes', choices=['M','U'],
default='M',
help='the nodes to create graph was either movies or users')
"""
main function
"""
def main(args):
df, A, A_fill_zeros = read_preprocss_data(args)
print('done reading the data')
#===========================================================================
# # use a subset of data just for testing everything first
# nu=100 # number of users
# ni=200 # number of items
# A_temp = A.copy()
# data = A_temp[:nu,:ni] # small 10 X 20 submatrix
# print(data.shape)
#
# A_temp = A_fill_zeros.copy()
# data_fill_zeros = A_temp[:nu,:ni] # small 10 X 20 submatrix
data = A.copy()
data_fill_zeros = A_fill_zeros.copy()
print('data shape is:', data.shape)
print('data fill zero shape is:', data_fill_zeros.shape)
#===========================================================================
zero_nums = (np.sum((data_fill_zeros==0).astype(int)))
nonzero_nums = (np.sum((data_fill_zeros!=0).astype(int)))
sparsity = zero_nums / (zero_nums+nonzero_nums)
print('sparsity index of the data is', sparsity)
#===========================================================================
# STEP
#===========================================================================
n_k = [4, 5]
MSEs_train = np.zeros((args.n_epochs, len(n_k)))
RMSEs_train = np.zeros((args.n_epochs, len(n_k)))
MSEs_test = np.zeros((args.n_epochs, len(n_k)))
RMSEs_test = np.zeros((args.n_epochs, len(n_k)))
counts_corr_train = np.zeros((args.n_epochs, len(n_k)))
counts_corr_test = np.zeros((args.n_epochs, len(n_k)))
prc_correct_train = np.zeros((args.n_epochs, len(n_k)))
prc_correct_test = np.zeros((args.n_epochs, len(n_k)))
inds=np.nonzero(data_fill_zeros)
nn=inds[0].shape[0]
num_test = np.ceil(args.test_prc*nn).astype(int)
num_train = nn-num_test
for epch in range(args.n_epochs):
print('-------------\nEpochs %s starts\n-------------' %epch)
ir = np.random.permutation(nn)
inds0 = inds[0].copy()
inds1 = inds[1].copy()
tst_ind0 = np.asarray([inds0[ir[i]] for i in range(num_test)])
tst_ind1 = np.asarray([inds1[ir[i]] for i in range(num_test)])
tr_ind0 = np.asarray([inds0[ir[i+num_test]] for i in range(num_train)])
tr_ind1 = np.asarray([inds1[ir[i+num_test]] for i in range(num_train)])
tst_trget = data[tst_ind0, tst_ind1].copy()
train_data = data.copy()
print('train_data.shape', train_data.shape)
train_data[tst_ind0, tst_ind1] = 0
trn_trget = train_data[tr_ind0, tr_ind1].copy()
for ikk, kk in enumerate(n_k):
time_start=time.time()
print('k: ', kk)
print('ikk:', ikk)
U, sigmaTmp, Vt = svds(train_data, k = kk)
sigma = np.zeros([sigmaTmp.shape[0], sigmaTmp.shape[0]])
np.fill_diagonal(sigma, sigmaTmp)
pred_ratings = np.dot(np.dot(U, sigma), Vt)
print('pred_ratings time elapsed: {} sec'.format(time.time()-time_start))
err_tr = (pred_ratings[tr_ind0, tr_ind1] - trn_trget)**2
err_ts = (pred_ratings[tst_ind0, tst_ind1] - tst_trget)**2
diff_tr = (pred_ratings[tr_ind0, tr_ind1] - trn_trget)
incorrect_tr = np.nonzero(diff_tr)[0]
count_correct_tr = diff_tr.shape[0] - incorrect_tr.shape[0]
prc_correct_tr = count_correct_tr/diff_tr.shape[0]
counts_corr_train[epch, ikk] = count_correct_tr
prc_correct_train[epch, ikk] = prc_correct_tr
print('count correct train ', count_correct_tr)
print('percentage correct train ', prc_correct_tr)
diff_ts = (pred_ratings[tst_ind0, tst_ind1] - tst_trget)
incorrect_ts = np.nonzero(diff_ts)[0]
count_correct_ts = diff_ts.shape[0] - incorrect_ts.shape[0]
prc_correct_ts = count_correct_ts/diff_ts.shape[0]
counts_corr_test[epch, ikk] = count_correct_ts
prc_correct_test[epch, ikk] = prc_correct_ts
print('count correct test ', count_correct_tr)
print('percentage correct test ', prc_correct_tr)
MSE_tr = np.mean(err_tr)
RMSE_tr = np.sqrt(MSE_tr)
MSEs_train[epch, ikk] = MSE_tr
RMSEs_train[epch, ikk] = RMSE_tr
print('MSE train is:', MSE_tr)
print('RMSE train is:', RMSE_tr)
MSE_ts = np.mean(err_ts)
RMSE_ts = np.sqrt(MSE_ts)
MSEs_test[epch, ikk] = MSE_ts
RMSEs_test[epch, ikk] = RMSE_ts
print('MSE test is:', MSE_ts)
print('RMSE test is:', RMSE_ts)
if epch%50==0:
fn_str = args.RESULTPATH + 'mc_pred_rating_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(pred_ratings, f)
# Save errors
fn_str = args.RESULTPATH + 'mc_MSE_tr_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(MSEs_train, f)
fn_str = args.RESULTPATH + 'mc_RMSE_tr_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(RMSEs_train, f)
fn_str = args.RESULTPATH + 'mc_MSE_ts_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(MSEs_test, f)
fn_str = args.RESULTPATH + 'mc_RMSE_ts_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(RMSEs_test, f)
#
fn_str = args.RESULTPATH + 'mc_cnt_corr_tr_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(counts_corr_train, f)
fn_str = args.RESULTPATH + 'mc_cnt_corr_ts_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(counts_corr_test, f)
fn_str = args.RESULTPATH + 'mc_prc_corr_tr_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(prc_correct_train, f)
fn_str = args.RESULTPATH + 'mc_prc_corr_ts_%s_%s_%s_epch%s.npy' \
%(args.fillnan, args.sim_method, args.test_prc, epch)
with open(fn_str, 'wb') as f:
pickle.dump(prc_correct_test, f)
print('saving in matrix completion is done')
"""
==============================================================================
Main
==============================================================================
"""
if __name__ == '__main__':
args=parser.parse_args()
print('-------Arguments:---------')
print(args)
print('--------------------------')
main(args)
print('DONE!!!')
|
nilq/baby-python
|
python
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
import smcat.common
def serializeDateTime(dt):
return smcat.common.datetimeToJsonStr(dt)
class DocumentItem(scrapy.Item):
"""
Attributes:
id: A unique identifier for this item. Not idempotent with subsequent harvests.
kind: type name of this item
time_retrieved: When the item was generated
source: URL of the document leading to this item
url: URL of the document this item is about
time_mod: Timestamp reported in HTTP response Last-Modified header, if available
"""
id = scrapy.Field()
kind = scrapy.Field()
from_item = scrapy.Field()
time_retrieved = scrapy.Field(serializer=serializeDateTime)
source = scrapy.Field()
url = scrapy.Field()
time_mod = scrapy.Field(serializer=serializeDateTime)
def __init__(self):
super().__init__()
self.set(kind=self.name())
self.set(id=smcat.common.getId())
def setV(self, k, v, allow_none=False):
if v is None and not allow_none:
return
self[k] = v
def set(self, allow_none=False, **kwargs):
for k, v in kwargs.items():
if v is None and not allow_none:
continue
self[k] = v
def name(self):
return self.__class__.__name__
class RobotstxtItem(DocumentItem):
"""
Describes a robots.txt document
"""
pass
class SitemapItem(DocumentItem):
"""
Describes a sitemap.xml document
"""
pass
class SitemaplocItem(DocumentItem):
"""
Properties of a document identified by a sitemap loc entry.
Attributes:
time_loc: Timestamp in sitemap lastmod value, if available
link_type: Type value from link, if available
link_profile: Profile value from link, if available
changefreq: String value of the changefreq element, if available
priority: Value of the priority element, if available
"""
time_loc = scrapy.Field(serializer=serializeDateTime)
link_type = scrapy.Field()
link_profile = scrapy.Field()
changefreq = scrapy.Field()
priority = scrapy.Field()
class JsonldItem(DocumentItem):
# JsonLD content retrieved from a URL
data = scrapy.Field()
|
nilq/baby-python
|
python
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
from airflow import configuration
from airflow import models
from airflow.contrib.hooks import gcs_hook
from airflow.contrib.operators import mssql_to_gcs
from airflow.operators import python_operator
from airflow.utils.trigger_rule import TriggerRule
from airflow.operators import email_operator
from google.cloud import storage
client = storage.Client()
bucket = client.get_bucket('us-central1-shared-logic-en-4c9cc71e-bucket')
blob = storage.Blob('dags/sql_queries/mrrecvh_mrrecvd_ppprice_lj.sql', bucket)
# We set the start_date of the DAG to the previous date. This will
# make the DAG immediately available for scheduling.
YESTERDAY = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
# We define some variables that we will use in the DAG tasks.
SUCCESS_TAG = 'success'
FAILURE_TAG = 'failure'
DATE = '{{ ds }}'
DEFAULT_DAG_ARGS = {
'start_date': YESTERDAY,
'retries': 0,
'project_id': models.Variable.get('gcp_project')
}
query1 = blob.download_as_string
with models.DAG(dag_id='mssql_gcs_dataflow_bigquery_dag_1',
description='A DAG triggered by an external Cloud Function',
schedule_interval=None, default_args=DEFAULT_DAG_ARGS) as dag:
# Export task that will process SQL statement and save files to Cloud Storage.
export_sales_orders = mssql_to_gcs.MsSqlToGoogleCloudStorageOperator(
task_id='mrrecvh_mrrecvd_ppprice_lj',
sql=query1,
bucket=models.Variable.get('mssql_export_bucket'),
filename=DATE + '-export.json',
mssql_conn_id='shapiro-sql',
dag=dag
)
# Here we create two conditional tasks, one of which will be executed
# based on whether the export_sales_orders was a success or a failure.
success_move_task = email_operator.EmailOperator(task_id='success',
trigger_rule=TriggerRule.ALL_SUCCESS,
to=models.Variable.get('email'),
subject='mssql_gcs_dataflow_bigquery_dag_1 Job Succeeded: start_date {{ ds }}',
html_content="HTML CONTENT"
)
failure_move_task = email_operator.EmailOperator(task_id='failure',
trigger_rule=TriggerRule.ALL_FAILED,
to=models.Variable.get('email'),
subject='mssql_gcs_dataflow_bigquery_dag_1 Job Failed: start_date {{ ds }}',
html_content="HTML CONTENT"
)
# The success_move_task and failure_move_task are both downstream from the
# dataflow_task.
export_sales_orders >> success_move_task
export_sales_orders >> failure_move_task
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#! \file ~/doit_doc_template/templates/base/library/type_page.py
#! \author Jiří Kučera, <sanczes AT gmail.com>
#! \stamp 2019-07-04 09:41:22 +0200
#! \project DoIt! Doc: Sphinx Extension for DoIt! Documentation
#! \license MIT
#! \version See doit_doc_template.__version__
#! \brief See __doc__
#
"""\
Page type.\
"""
__license__ = """\
Copyright (c) 2014 - 2019 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
from sphinx.errors import ExtensionError
from doit_doc_template.core.errors import BadTypeError
from doit_doc_template.core.utils import simplerep
class PageStackError(ExtensionError):
"""
"""
message = "Page element stack corrupted: {}."
bad_mark_message = "Element '{}' is not identical with mark '{}'"
no_mark_message = "Hitting the stack bottom while waiting for '{}' mark"
__slots__ = []
def __init__(self, detail):
"""
"""
ExtensionError.__init__(self, message.format(detail))
#-def
@classmethod
def bad_mark(cls, elem, mark):
"""
"""
return cls(bad_mark_message.format(simplerep(elem), simplerep(mark)))
#-def
@classmethod
def no_mark(cls, mark):
"""
"""
return cls(no_mark_message.format(simplerep(mark)))
#-def
#-class
class Page(object):
"""
"""
__slots__ = ["urimap", "pending_labels", "stack"]
def __init__(self):
"""
"""
self.urimap = {}
self.pending_labels = []
self.stack = []
#-def
def adduri(self, name, uri):
"""
"""
self.urimap[name] = uri
#-def
def pushlabel(self, label):
"""
"""
self.pending_labels.append(label)
#-def
def pushmark(self, mark):
"""
"""
self.stack.append(mark)
#-def
def popmark(self, mark, markcls):
"""
"""
result = []
while self.stack:
elem = self.stack.pop()
if elem is mark:
return result
if isinstance(elem, markcls):
raise PageStackError.bad_mark(elem, mark)
result.append(elem)
raise PageStackError.no_mark(mark)
#-def
#-class
def type_page(param, obj):
"""
"""
if not isinstance(obj, Page):
raise BadTypeError(param, obj, Page)
return obj
#-def
|
nilq/baby-python
|
python
|
import sys
from .commands import main
sys.exit(main())
|
nilq/baby-python
|
python
|
import logging
from hearthstone.battlebots.priority_storage_bot import priority_st_ad_tr_bot
from hearthstone.battlebots.random_bot import RandomBot
from hearthstone.host import RoundRobinHost
def main():
logging.basicConfig(level=logging.DEBUG)
host = RoundRobinHost({"random_action_bot":RandomBot(2),
"my_bot":priority_st_ad_tr_bot(1)
})
host.play_game()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
BUMP_LIMIT = 20
THREAD_LIMIT = 5
SQL_CONST_OP = 0
MAX_FILE_SIZE = 1 << 21 # 2 MB
MAX_OP_IMG_WH = 250
MAX_IMG_WH = 150
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif', 'tiff', 'bmp'])
MAX_POST_LEN = 5000
class FlaskRestConf(object):
RESTFUL_JSON = {'default': str}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import glob
import os
import shlex
import sys
import platform
script_dir = os.path.dirname(__file__)
jc3_handling_editor_root = os.path.normpath(os.path.join(script_dir, os.pardir))
sys.path.insert(0, os.path.abspath(os.path.join(jc3_handling_editor_root, 'tools')))
sys.path.insert(0, os.path.join(jc3_handling_editor_root, 'tools', 'gyp', 'pylib'))
import gyp
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
if sys.platform == 'win32':
args.append(os.path.join(jc3_handling_editor_root, 'jc3_handling_editor.gyp'))
standalone_fn = os.path.join(jc3_handling_editor_root, 'standalone.gypi')
toolchain_fn = os.path.join(jc3_handling_editor_root, 'toolchain.gypi')
common_fn = os.path.join(jc3_handling_editor_root, 'common.gypi')
options_fn = os.path.join(jc3_handling_editor_root, 'config.gypi')
else:
args.append(os.path.join(os.path.abspath(jc3_handling_editor_root), 'jc3_handling_editor.gyp'))
standalone_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'standalone.gypi')
toolchain_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'toolchain.gypi')
common_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'common.gypi')
options_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'config.gypi')
if os.path.exists(standalone_fn):
args.extend(['-I', standalone_fn])
if os.path.exists(toolchain_fn):
args.extend(['-I', toolchain_fn])
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
if os.path.exists(options_fn):
args.extend(['-I', options_fn])
args.append('--depth=' + jc3_handling_editor_root)
#args.append('-Dcomponent=shared_library')
#args.append('-Dlibrary=shared_library')
gyp_args = list(args)
print os.environ.get('GYP_GENERATORS')
gyp_generators = os.environ.get('GYP_GENERATORS')
#if platform.system() == 'Linux' and gyp_generators != 'ninja':
# --generator-output defines where the Makefile goes.
gyp_args.append('--generator-output=out')
# -Goutput_dir defines where the build output goes, relative to the
# Makefile. Set it to . so that the build output doesn't end up in out/out.
gyp_args.append('-Goutput_dir=.')
run_gyp(gyp_args)
|
nilq/baby-python
|
python
|
from DownloadData import DownloadData, UnzipData
DownloadData()
UnzipData()
|
nilq/baby-python
|
python
|
import argparse
from getpass import getpass
from classes.Application import Application
if __name__ == "__main__":
CONFIG_PATH = "./config/config.yaml"
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='function')
# Create accounts parser
parser_create_accounts = subparsers.add_parser('create_accounts')
parser_create_accounts.add_argument('-n', '--number', type=int, help='Number of accounts to create.', required=True)
parser_create_accounts.add_argument('-p', '--password', help='Password for keyfiles.')
parser_create_accounts.add_argument('-d', '--directory', help='Directory where keyfiles will be generated.',
default='./accounts/')
# Dispatch currency parser
parser_dispatch_currency = subparsers.add_parser('dispatch_currency')
parser_dispatch_currency.add_argument('-a', '--amount', type=float, help='Amount of tokens to send to each address.', required=True)
parser_dispatch_currency.add_argument('-f', '--from_address', help='Address which will send tokens.', required=True)
parser_dispatch_currency.add_argument('-b', '--blockchain',
help='Blockchain name where transactions will be made '
'(see config file).',
required=True)
parser_dispatch_currency.add_argument('-p', '--password', help='Password of sender address keyfile.', required=True)
parser_dispatch_currency.add_argument('-k', '--keys_dir', help='Directory where keyfiles are located.',
default='./accounts/')
# Extract transactions parser
parser_extract_transactions = subparsers.add_parser('extract_transactions')
parser_extract_transactions.add_argument('-a', '--address',
help='Address from which transaction have to be extracted.',
required=True)
parser_extract_transactions.add_argument('-b', '--blockchains',
help='Blockchains names from which transactions have to be extracted '
'(see config file), separated by commas.',
required=True)
# Farm parser
farm = subparsers.add_parser('farm')
farm.add_argument('-p', '--password', help='Password of keyfiles.', required=True)
farm.add_argument('-b', '--blockchains',
help='Blockchain names from which transactions have to be extracted '
'(see config file), separated by commas.',
required=True)
farm.add_argument('-P', '--playbook', help='Playbook file containing transactions and blockchains (generated with '
'extract_transactions function.', required=True)
farm.add_argument('-k', '--keys_dir', help='Directory where keyfiles are located.',
default='./accounts/')
args = parser.parse_args()
if args.function == 'create_accounts':
application = Application(CONFIG_PATH, args.directory)
if not args.password:
try:
password = getpass(prompt='Enter a password for keyfiles: ')
application.create_accounts(args.number, args.directory, password)
except Exception as error:
print('ERROR', error)
else:
application.create_accounts(args.number, args.directory, args.password)
elif args.function == 'extract_transactions':
application = Application(CONFIG_PATH)
application.extract_transactions_from_address(args.address, args.blockchains.split(','))
elif args.function == 'dispatch_currency':
application = Application(CONFIG_PATH, args.keys_dir)
application.dispatch_currency(args.amount, args.from_address, args.blockchain, args.password)
elif args.function == 'farm':
application = Application(CONFIG_PATH, args.keys_dir)
application.farm(args.password, args.playbook, args.blockchains.split(','))
|
nilq/baby-python
|
python
|
from django.db import models
from django.contrib.auth.models import User
import uuid
# Question user
class Quser(models.Model):
id= models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
email = models.EmailField(unique=True, null=True)
profile_image = models.ImageField(upload_to='profile/', null=True)
first_name = models.CharField(max_length=200, null=True)
last_naem = models.CharField(max_length=200, null=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email
|
nilq/baby-python
|
python
|
## @file
## @brief metacircular implementation in metaL/py
## @defgroup circ Metacircular
## @brief `implementation in metaL/py`
## @{
from metaL import *
## `<module:metaL>` reimplements itself using host VM metainfo
MODULE = vm['MODULE']
## `~/metaL/$MODULE` target directory for code generation
diroot = Dir(MODULE)
vm['dir'] = diroot
## file masks will be ignored by `git` version manager
gitignore = pygIgnore('.gitignore')
vm['gitignore'] = gitignore
diroot // gitignore
gitignore.sync()
## `Makefile` for target project build/run
mk = pyMakefile()
vm['mk'] = mk
diroot // mk
mk // Section(MODULE)
mk.sync()
print(vm)
## @}
|
nilq/baby-python
|
python
|
# coding=utf-8
"""Provides utilities for serialization/deserialization of
Tempo data types.
"""
from six import string_types
from rest_framework import serializers
from tempo.recurrenteventset import RecurrentEventSet
# pylint: disable=no-init,no-self-use,no-member
class RecurrentEventSetField(serializers.Field):
"""Representation of RecurrentEventSet."""
default_error_messages = {
'incorrect_type': 'Incorrect type. Expected a string or list/tuple, '
'but got {input_type}',
'incorrect_format': 'Incorrect format.',
}
# noinspection PyMethodMayBeStatic
def to_representation(self, obj):
return obj.to_json()
def to_internal_value(self, data):
# pylint: disable=missing-docstring
if not isinstance(data, (string_types, list, tuple)):
self.fail('incorrect_type', input_type=type(data).__name__)
if not RecurrentEventSet.validate_json(data):
self.fail('incorrect_format')
return RecurrentEventSet.from_json(data)
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2018–2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from mergify_engine import config
from mergify_engine import context
from mergify_engine.tests.functional import base
class TestUpdateAction(base.FunctionalTestBase):
async def test_update_action(self):
rules = {
"pull_request_rules": [
{
"name": "update",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"update": {}},
},
{
"name": "merge",
"conditions": [f"base={self.main_branch_name}", "label=merge"],
"actions": {"merge": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
commits = await self.get_commits(p2["number"])
assert len(commits) == 1
await self.add_label(p1["number"], "merge")
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
commits = await self.get_commits(p2["number"])
assert len(commits) == 2
assert commits[-1]["commit"]["author"]["name"] == config.BOT_USER_LOGIN
assert commits[-1]["commit"]["message"].startswith("Merge branch")
async def test_update_action_on_closed_pr_deleted_branch(self):
rules = {
"pull_request_rules": [
{
"name": "update",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"update": {}},
},
{
"name": "merge",
"conditions": [f"base={self.main_branch_name}", "label=merge"],
"actions": {"merge": {}, "delete_head_branch": {}},
},
]
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
commits = await self.get_commits(p2["number"])
assert len(commits) == 1
await self.add_label(p1["number"], "merge")
await self.run_engine()
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
commits = await self.get_commits(p2["number"])
assert len(commits) == 2
assert commits[-1]["commit"]["author"]["name"] == config.BOT_USER_LOGIN
assert commits[-1]["commit"]["message"].startswith("Merge branch")
# Now merge p2 so p1 is not up to date
await self.add_label(p2["number"], "merge")
await self.run_engine()
ctxt = await context.Context.create(self.repository_ctxt, p1, [])
checks = await ctxt.pull_engine_check_runs
for check in checks:
assert check["conclusion"] == "success", check
|
nilq/baby-python
|
python
|
import GPyOpt
import chaospy
import matplotlib
import math
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
np.set_printoptions(linewidth=200, precision=4)
def equation(x, selection_index):
target_region = {'x': (0, 1), 'y': (0, 1)}
def function(selection_index, h=1): #1 is just a dummy value
if selection_index == 1:
f = math.sin(h) + math.sin(10 * h / 3)
region_of_interest = {'x': (2.7, 7.5), 'y': (-2, 1)}
if selection_index == 2:
f = - (16 * h ** 2 - 24 * h + 5) * math.e ** -h
region_of_interest = {'x': (1.9, 3.9), 'y': (-4, -2.4)}
if selection_index == 3:
f = - (1.4 - 3 * h) * math.sin(18 * h)
region_of_interest = {'x': (0, 1.2), 'y': (-1.5, 2.5)}
if selection_index == 4:
f = - (h + math.sin(h)) * math.e ** - (h ** 2)
region_of_interest = {'x': (-10, 10), 'y': (-1, 1)}
if selection_index == 5:
f = math.sin(h) + math.sin(10 * h / 3) + math.log(h) - 0.84 * h + 3
region_of_interest = {'x': (2.7, 7.5), 'y': (-2, 3)}
if selection_index == 6:
f = - h * math.sin(h)
region_of_interest = {'x': (0, 10), 'y': (-8, 6)}
if selection_index == 7:
f = math.sin(h) ** 3 + math.cos(h) ** 3
region_of_interest = {'x': (0, 2 * math.pi), 'y': (-1, 1)}
if selection_index == 8:
f = - h ** (2 / 3) - (1 - h ** 2) ** (1 / 3)
region_of_interest = {'x': (0.001, 0.99), 'y': (-1.6, -1)}
if selection_index == 9:
f = - (math.e ** (-h)) * math.sin(2 * math.pi * h)
region_of_interest = {'x': (0, 4), 'y': (-0.8, 0.6)}
if selection_index == 10:
f = (h ** 2 - 5 * h + 6) / (h ** 2 + 1)
region_of_interest = {'x': (-5, 5), 'y': (-1, 8)}
return f, region_of_interest
_, region_of_interest = function(selection_index)
x_translate = target_region['x'][0] - region_of_interest['x'][0]
y_translate = target_region['y'][0] - region_of_interest['y'][0]
x_squeeze = (target_region['x'][1] - target_region['x'][0]) / (region_of_interest['x'][1] - region_of_interest['x'][0])
y_squeeze = (target_region['y'][1] - target_region['y'][0]) / (region_of_interest['y'][1] - region_of_interest['y'][0])
h = x / x_squeeze - x_translate
j, _ = function(selection_index, h)
y = (j + y_translate) * y_squeeze
return y
def plot_evaluated_points(X, Y, X_design, Y_design, x_minimum=0, y_minimum=0):
title = 'Evaluations for Mixed-variable Balance Case'
num_discrete = 10
label_color = 'midnightblue'
fig_mixed = matplotlib.pyplot.figure(figsize=(10, 5))
ax_mixed = fig_mixed.add_subplot(1, 1, 1)
ax_mixed.set_title(title, fontweight = 550, fontsize = 'large')
resolution = 100
xyz = np.ones((resolution * num_discrete, 3))
for index in range(num_discrete):
start = index * resolution
end = (index + 1) * resolution
xyz[start:end, 0] = np.linspace(0, 1, resolution)
xyz[start:end, 1] *= index + 1
xyz[start:end, 2] = np.asarray([equation(x[0], x[1]) for x in xyz[start:end, [0, 1]]]).reshape(resolution)
# ax_mixed.plot(xs = xyz[start:end, 0], ys = xyz[start:end, 1], zs = xyz[start:end, 2])
X_surface = xyz[:, 0]
Y_surface = xyz[:, 1]
X_surface, Y_surface = np.meshgrid(X_surface, Y_surface)
XY_ravel = np.append(X_surface.ravel()[:, np.newaxis], Y_surface.ravel()[:, np.newaxis], axis=1)
Z_surface = np.asarray([equation(x[0], x[1]) for x in XY_ravel]).reshape(X_surface.shape)
#ax_mixed.plot_surface(X_surface, Y_surface, Z_surface,
# cmap=matplotlib.cm.plasma, linewidth=1)
contour = ax_mixed.contourf(X_surface, Y_surface, Z_surface, cmap=matplotlib.cm.viridis)
fig_mixed.colorbar(contour, ax=ax_mixed)
X_acquisition = np.delete(X, list(range(X_design.shape[0])), axis = 0)
Y_acquisition = np.delete(Y, list(range(Y_design.shape[0])), axis = 0)
size = np.linspace(100, 10, X_acquisition.shape[0])
ax_mixed.scatter(x=X_design[:, 0], y=X_design[:, 1], c='firebrick', marker='o', s=100)
ax_mixed.scatter(x=X_acquisition[:, 0], y=X_acquisition[:, 1], c='orange', marker=7, s=size)
ax_mixed.scatter(x = X[np.argmin(Y), 0], y = X[np.argmin(Y), 1], c='crimson', marker = 'x', s=200)
ax_mixed.scatter(x = x_minimum, y = y_minimum, c='black', marker = '*', s=200)
ax_mixed.set_xlabel('x-value', color = label_color)
ax_mixed.set_ylabel('Selection index', color = label_color)
design = matplotlib.lines.Line2D([], [], color = 'firebrick', linestyle='None', marker = 'o', markersize = 10, label = 'design points')
acquisition = matplotlib.lines.Line2D([], [], color = 'orange', linestyle='None', marker = 7, markersize = 10, label = 'acquisitions')
located_optimum = matplotlib.lines.Line2D([], [], color = 'crimson', linestyle='None', marker = 'x', markersize = 10, label = 'located optimum')
actual_optimum = matplotlib.lines.Line2D([], [], color = 'black', linestyle='None', marker = '*', markersize = 10, label = 'actual optimum')
ax_mixed.legend(handles = [design, acquisition, located_optimum, actual_optimum], loc = 'best', shadow = True)
fig_mixed.tight_layout(pad=0.35, w_pad=0.5, h_pad=2.5)
return None
def compare_with_actual(problem, variables):
continuous_bounds = variables[0]['domain']
discrete_levels = variables[1]['domain']
fig = matplotlib.pyplot.figure(figsize=(10, 5 * len(discrete_levels)))
ax = [None for n in range(2*len(discrete_levels))]
label_color = 'midnightblue'
plot = 0
x1_continuous = np.linspace(continuous_bounds[0], continuous_bounds[1], 1000)
for x2_discrete in discrete_levels:
Y_actual = []
Y_metamodel = []
for x1 in x1_continuous:
X = np.asarray([x1, x2_discrete])
mv = problem.model.predict(X)
Y_a = equation(x1, x2_discrete)
Y_m = np.asarray(mv).reshape(2)[0]
Y_actual.append(Y_a)
Y_metamodel.append(Y_m)
ax[plot] = fig.add_subplot(len(discrete_levels), 2, plot+1)
title = f'Discrete value #{x2_discrete} (Actual)'
ax[plot].set_title(title, fontweight = 550, fontsize = 'large')
ax[plot].plot(x1_continuous, Y_actual, 'b')
ax[plot].set_xlabel('x-position', color = label_color)
ax[plot].set_ylabel('Distance (to minimize)', color = label_color)
plot += 1
ax[plot] = fig.add_subplot(len(discrete_levels), 2, plot+1)
title = f'Discrete value #{x2_discrete} (Predicted)'
ax[plot].set_title(title, fontweight = 550, fontsize = 'large')
ax[plot].plot(x1_continuous, Y_metamodel, 'b')
ax[plot].set_xlabel('x-position', color = label_color)
ax[plot].set_ylabel('Distance (to minimize)', color = label_color)
plot += 1
fig.tight_layout(pad=0.35, w_pad=0.5, h_pad=3.5)
return None
def plot_convergence(Y_data):
X = [x for x in range(1, len(Y_data)+1)]
Y = [y for y in Y_data]
convergence_fig = matplotlib.pyplot.figure(figsize=(10, 5))
ax = convergence_fig.add_subplot(1, 1, 1)
title = 'Convergence Plot'
ax.set_title(title, fontweight = 550, fontsize = 'large')
ax.plot(X, Y, 'b', marker='o')
ax.set_xlabel('Batch Iteration')
ax.set_ylabel('Objective Value')
return None
def generate_experimental_design(num_design):
print('Generating experimental design...\n')
hammerseley = chaospy.distributions.sampler.sequences.hammersley
base = hammerseley.create_hammersley_samples(num_design, dim=2, burnin=-1, primes=()) #numpy array
x = (base[0, :] * 1).tolist()
selection_index = np.rint(base[1, :] * 9 + 1).astype(int).tolist()
design = np.asarray([[x[design], selection_index[design]] for design in range(num_design)])
return design
space_mixed_variables = \
[{'name': 'x', 'type': 'continuous', 'domain':(0,1)},
{'name': 'selection_index', 'type': 'discrete', 'domain': (1,2,3,4,5,6,7,8,9,10)}]
#space_mixed = GPyOpt.core.task.space.Design_space(space_mixed_variables)
#experiment_design_mixed_X = GPyOpt.experiment_design.LatinMixedDesign(space_mixed).get_samples(20)
experiment_design_mixed_X = generate_experimental_design(200)
experiment_design_mixed_Y = []
for x, selection_index in experiment_design_mixed_X:
Y = equation(x, selection_index)
experiment_design_mixed_Y.append([Y])
experiment_design_mixed_Y = np.asarray(experiment_design_mixed_Y)
#plot_experiment_design_mixed(experiment_design_mixed_X)
X_values_mixed = experiment_design_mixed_X
Y_values_mixed = experiment_design_mixed_Y
numIterations_mixed = 1
X_initial_values_mixed = X_values_mixed
Y_initial_values_mixed = Y_values_mixed
X_initial_best = X_values_mixed[np.argmin(Y_values_mixed)]
Y_initial_best = Y_values_mixed[np.argmin(Y_values_mixed)]
best_x = []
best_fx = []
for step in range(numIterations_mixed):
mixed_problem = GPyOpt.methods.BayesianOptimization(
f = None,
domain = space_mixed_variables,
constraints = None,
cost_withGradients = None,
model_type = 'GP',
X = X_values_mixed,
Y = Y_values_mixed,
acquisition_type = 'EI',
normalize_Y = True,
exact_feval = False,
acquisition_optimizer_type = 'lbfgs',
evaluator_type = 'local_penalization',
batch_size = 1,
maximize = False,
de_duplication = True,
Gower = True,
noise_var = 0)
x_next_mixed = mixed_problem.suggest_next_locations()
y_next_mixed = []
for x, selection_index in x_next_mixed:
Y = equation(x, selection_index)
y_next_mixed.append([Y])
y_next_mixed = np.asarray(y_next_mixed)
X_values_mixed = np.vstack((X_values_mixed, x_next_mixed))
Y_values_mixed = np.vstack((Y_values_mixed, y_next_mixed))
print(f'Iteration {step+1}') # This cannot seem to be printed before the acquisition plot and the last print does not appear
mixed_problem.plot_acquisition()
print(f'New location/s: {[tuple(point) for point in x_next_mixed]}\n')
mixed_problem._compute_results()
#mixed_problem.plot_convergence()
best_x.append(mixed_problem.x_opt)
best_fx.append(mixed_problem.fx_opt)
best_x = np.asarray(best_x)
best_fx = np.asarray(best_fx)
plot_evaluated_points(X_values_mixed, Y_values_mixed, X_initial_values_mixed, Y_initial_values_mixed)
compare_with_actual(problem = mixed_problem, variables = space_mixed_variables)
print('X_initial_best', X_initial_best)
print('Y_initial_best', Y_initial_best)
print('Located optimum:', mixed_problem.x_opt)
print('Value:', mixed_problem.fx_opt)
plot_convergence(best_fx)
#These can be used to compare with x_opt and fx_opt to check consistency.
#print('Located optimum:', X_values_mixed[np.argmin(Y_values_mixed)])
#print('Value:', Y_values_mixed[np.argmin(Y_values_mixed)])
#print('Actual optimum:', [1, weights[0].index(min(weights[0]))])
#print('Value:', balance(np.asarray([1, weights[0].index(min(weights[0]))]).reshape(1, 2), weights))
mixed_problem.plot_convergence()
# endregion
|
nilq/baby-python
|
python
|
"""Used to plan actions by comparing what is live and what is defined locally.
.. note:: Currently only supported for `AWS CDK`_, `CloudFormation`_,
`Terraform`_, and `Troposphere`_.
When run, the environment is determined from the current git branch
unless ``ignore_git_branch: true`` is specified in the
:ref:`Runway config file<runway-config>`. If the ``DEPLOY_ENVIRONMENT``
environment variable is set, it's value will be used. If neither the git
branch or environment variable are available, the directory name is used.
The environment identified here is used to determine the env/config files
to use. It is also used with options defined in the Runway config file
such as ``assume_role``, ``account_id``, etc. See
:ref:`Runway Config<runway-config>` for details on these options.
The user will be prompted to select which
:ref:`deployment(s)<runway-deployment>` and
:ref:`module(s)<runway-module>` to process unless there is only one
:ref:`deployment<runway-deployment>` and/or
:ref:`module<runway-module>`, the environment variable ``CI`` is set,
or the ``--tag <tag>...`` option provided is used. In which case, the
:ref:`deployment(s)<runway-deployment>` and :ref:`module(s)<runway-module>`
will be processed in sequence, in the order they are defined.
.. rubric:: Options
+--------------------+-------------------------------------------------+
| ``--tag <tag>...`` | | Select modules for processing by tag or tags. |
| | This option can be specified |
| | | more than once to build a list of tags that |
| | are treated as "AND". |
| | | (ex. ``--tag <tag1> --tag <tag2>`` would |
| | select all modules with BOTH tags). |
+--------------------+-------------------------------------------------+
.. rubric:: Equivalent To
There are the native commands that are used:
- ``cdk diff`` - https://docs.aws.amazon.com/cdk/latest/guide/tools.html
- ``stacker diff`` -
https://stacker.readthedocs.io/en/stable/commands.html#diff
- ``terraform plan`` - https://www.terraform.io/docs/commands/plan.html
.. rubric:: Example
.. code-block:: shell
$ runway plan
"""
from ..modules_command import ModulesCommand
class Plan(ModulesCommand):
"""Extend ModulesCommand with execute to run the plan method."""
def execute(self):
"""Generate plans."""
self.run(deployments=None, command='plan')
|
nilq/baby-python
|
python
|
import logging
import subprocess
import mlflow
import mlflow.deployments.cli
import pandas as pd
import requests
from mlflow.models.signature import infer_signature
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score
from sklearn.pipeline import Pipeline
from dataset import Dataset
from src.conf import Conf
from src.mlflow_pyfunc import MlflowPyfunc
CONF = Conf()
class MLOps:
def __init__(self):
self.mlflow_conda = {
"channels": ["defaults"],
"name": "conda",
"dependencies": [
"python=3.8",
"pip",
{"pip": ["mlflow", "scikit-learn", "cloudpickle", "pandas", "numpy"]},
],
}
def mlflow_eval_and_log(
self, model_pipeline: Pipeline, validation_data: pd.DataFrame
) -> str:
valid_x = validation_data.drop(columns=CONF.col_label)
y_pred = model_pipeline.predict(valid_x)
with mlflow.start_run():
mlflow.log_metric(
"accuracy",
accuracy_score(validation_data[CONF.col_label].values, y_pred),
)
mlflow.log_metric(
"precison",
precision_score(validation_data[CONF.col_label].values, y_pred),
)
mlflow.log_metric(
"recall", recall_score(validation_data[CONF.col_label].values, y_pred)
)
mlflow.log_metric(
"roc_auc", roc_auc_score(validation_data[CONF.col_label].values, y_pred)
)
signature = infer_signature(valid_x, y_pred)
mlflow.pyfunc.log_model(
artifact_path="model",
python_model=MlflowPyfunc(model=model_pipeline),
conda_env=self.mlflow_conda,
signature=signature,
)
mlflow.sklearn.log_model(
artifact_path="model",
sk_model=model_pipeline,
conda_env=self.mlflow_conda,
signature=signature,
)
run = mlflow.active_run()
run_id = run.info.run_id
logging.info("Active run_id: {}".format(run_id))
return run_id
@staticmethod
def mlflow_serve(run_id: str):
bash_command = (
f"mlflow models serve -m {CONF.path_mlflow}/{run_id}/artifacts/model/"
)
logging.info(f"running bash_command: $ {bash_command}")
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print(output, error)
return output, error
@staticmethod
def mlflow_inference(data: pd.DataFrame) -> str:
host = "localhost"
port = "5000"
url = f"http://{host}:{port}/invocations"
headers = {
"Content-Type": "application/json",
}
feats = Dataset.get_feat_list(data)
data_x = data[feats]
http_data = data_x.to_json(orient="split")
r = requests.post(url=url, headers=headers, data=http_data)
print(f"Predictions: {r.text}")
return r.text
|
nilq/baby-python
|
python
|
'''LC1460: Make Two Arrays Equal by Reversing Sub-arrays
https://leetcode.com/problems/make-two-arrays-equal-by-reversing-sub-arrays/
Given two integer arrays of equal length target and arr.
In one step, you can select any non-empty sub-array
of arr and reverse it. You are allowed to make any
number of steps.
Return True if you can make arr
equal to target, or False otherwise
Example 1:
Input: target = [1,2,3,4], arr = [2,4,1,3]
Output: true
Example 2:
Input: target = [7], arr = [7]
Output: true
Explanation: arr is equal to target without any reverses.
Example 3:
Input: target = [1,12], arr = [12,1]
Output: true
Example 4:
Input: target = [3,7,9], arr = [3,7,11]
Output: false.
Example 5:
Input: target = [1,1,1,1,1], arr = [1,1,1,1,1]
Output: true'''
class Sln(object):
def canBeEqual(self, target, arr):
arr.sort()
target.sort()
return arr==target
|
nilq/baby-python
|
python
|
import os
import ctypes
import numpy as np
import copy
from envs import make_env
from envs.utils import goal_distance
from policy.replay_buffer import goal_concat
def c_double(value):
return ctypes.c_double(value)
def c_int(value):
return ctypes.c_int(value)
def gcc_complie(c_path, so_path=None):
assert c_path[-2:] == '.c'
if so_path is None:
so_path = c_path[:-2]+'.so'
else:
assert so_path[-3:] == '.so'
os.system('gcc -o '+so_path+' -shared -fPIC '+c_path+' -O2')
return so_path
def gcc_load_lib(lib_path):
if lib_path[-2:] == '.c':
lib_path = gcc_complie(lib_path)
else:
assert so_path[-3:] == '.so'
return ctypes.cdll.LoadLibrary(lib_path)
class MatchSampler:
def __init__(self, args, achieved_trajectory_pool):
self.args = args
self.env = make_env(args)
self.env_test = make_env(args)
self.dim = np.prod(self.env.reset()['achieved_goal'].shape)
self.delta = self.env.distance_threshold
self.length = args.episodes
init_goal = self.env.reset()['achieved_goal'].copy()
self.pool = np.tile(init_goal[np.newaxis, :], [
self.length, 1])+np.random.normal(0, self.delta, size=(self.length, self.dim))
self.init_state = self.env.reset()['observation'].copy()
self.match_lib = gcc_load_lib('utils/cost_flow.c')
self.achieved_trajectory_pool = achieved_trajectory_pool
# estimating diameter
self.max_dis = 0
for i in range(1000):
obs = self.env.reset()
dis = goal_distance(obs['achieved_goal'], obs['desired_goal'])
if dis > self.max_dis:
self.max_dis = dis
def add_noise(self, pre_goal, noise_std=None):
goal = pre_goal.copy()
dim = 2 if self.args.env[:5] == 'Fetch' else self.dim
if noise_std is None:
noise_std = self.delta
goal[:dim] += np.random.normal(0, noise_std, size=dim)
return goal.copy()
def sample(self, idx):
if self.args.env[:5] == 'Fetch':
return self.add_noise(self.pool[idx])
else:
return self.pool[idx].copy()
def find(self, goal):
res = np.sqrt(np.sum(np.square(self.pool-goal), axis=1))
idx = np.argmin(res)
if test_pool:
self.args.logger.add_record('Distance/sampler', res[idx])
return self.pool[idx].copy()
def update(self, initial_goals, desired_goals):
if self.achieved_trajectory_pool.counter == 0:
self.pool = copy.deepcopy(desired_goals)
return
achieved_pool, achieved_pool_init_state = self.achieved_trajectory_pool.pad()
candidate_goals = []
candidate_edges = []
candidate_id = []
agent = self.args.agent
achieved_value = []
for i in range(len(achieved_pool)):
obs = [goal_concat(achieved_pool_init_state[i], achieved_pool[i][j])
for j in range(achieved_pool[i].shape[0])]
feed_dict = {
agent.state_t_input: obs
}
value = agent.sess.run(agent.q_pi, feed_dict)[:, 0]
value = np.clip(value, -1.0/(1.0-self.args.gamma), 0)
achieved_value.append(value.copy())
n = 0
graph_id = {'achieved': [], 'desired': []}
for i in range(len(achieved_pool)):
n += 1
graph_id['achieved'].append(n)
for i in range(len(desired_goals)):
n += 1
graph_id['desired'].append(n)
n += 1
self.match_lib.clear(n)
for i in range(len(achieved_pool)):
self.match_lib.add(0, graph_id['achieved'][i], 1, 0)
for i in range(len(achieved_pool)):
for j in range(len(desired_goals)):
res = np.sqrt(np.sum(np.square(achieved_pool[i]-desired_goals[j]), axis=1)) - \
achieved_value[i]/(self.args.hgg_L /
self.max_dis/(1-self.args.gamma))
match_dis = np.min(
res)+goal_distance(achieved_pool[i][0], initial_goals[j])*self.args.hgg_c
match_idx = np.argmin(res)
edge = self.match_lib.add(
graph_id['achieved'][i], graph_id['desired'][j], 1, c_double(match_dis))
candidate_goals.append(achieved_pool[i][match_idx])
candidate_edges.append(edge)
candidate_id.append(j)
for i in range(len(desired_goals)):
self.match_lib.add(graph_id['desired'][i], n, 1, 0)
match_count = self.match_lib.cost_flow(0, n)
assert match_count == self.length
explore_goals = [0]*self.length
for i in range(len(candidate_goals)):
if self.match_lib.check_match(candidate_edges[i]) == 1:
explore_goals[candidate_id[i]] = candidate_goals[i].copy()
assert len(explore_goals) == self.length
self.pool = np.array(explore_goals)
|
nilq/baby-python
|
python
|
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import password_validation
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import Profile, User
class LoginForm(AuthenticationForm):
username = forms.CharField(label="Username", max_length=30,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'username'}))
password = forms.CharField(label="Password", max_length=30,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'password'}))
class UserRegistrationForm(forms.Form):
username = forms.CharField(
required=True,
max_length=32,
widget=forms.TextInput(attrs={'placeholder': 'Username'})
)
email = forms.EmailField(
required=True
)
password = forms.CharField(
required=True,
max_length=32,
widget=forms.PasswordInput,
)
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('full_name', 'birth_date', 'birth_place', 'address', 'phone_number', 'id_doc_number')
widgets = {
'full_name': forms.TextInput(
attrs={
'placeholder': _('full name'),
'class': 'form-input',
'type': 'text'
}
),
'birth_date': forms.DateInput(
attrs={
'placeholder': _('birth date: 1990-01-01'),
'class': 'form-input',
'type': 'date'
}
),
'birth_place': forms.TextInput(
attrs={
'placeholder': _('place of birth'),
'class': 'form-input'
}
),
'address': forms.TextInput(
attrs={
'placeholder': _('residency address'),
'class': 'form-input'
}
),
'phone_number': forms.TextInput(
attrs={
'placeholder': _('phone number'),
'class': 'form-input',
'type': 'tel'
}
),
'id_doc_number': forms.TextInput(
attrs={
'placeholder': _('identification document number'),
'class': 'form-input',
'type': 'number'
}
),
}
class SignupForm(forms.ModelForm):
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput,
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
class Meta:
model = User
fields = ("email",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update({'autofocus': True})
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get('password2')
if password:
try:
password_validation.validate_password(password, self.instance)
except forms.ValidationError as error:
self.add_error('password2', error)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
|
nilq/baby-python
|
python
|
import difflib
import os.path
import subprocess
import sys
from testconfig import config
from functools import partial
from six import print_, iteritems
tests_dir = partial(os.path.join, config['dirs']['tests'])
forth_dir = partial(os.path.join, config['dirs']['forth'])
logs_dir = partial(os.path.join, config['dirs']['logs'], 'forth')
def run_forth_vm(out = None, machine = None, options = None, diff_expected = None, coverage_name = None):
options = options or []
cmd = [
config['vm-runner']['ducky-vm'],
'--machine-config=%s' % tests_dir('forth', 'machine.conf'),
'--set-option=bootloader:file=%s' % forth_dir('ducky-forth'),
'--set-option=device-6:filepath=%s' % config['forth']['dummy-storage']
] + options + [
'--set-option=device-3:stream_out=%s' % out
]
env = os.environ.copy()
if config['options']['coverage'] == 'yes':
assert coverage_name is not None
cmd[0] = '%s %s' % (config['vm-runner']['coverage'], cmd[0])
env['COVERAGE_FILE'] = os.path.join(config['dirs']['coverage'], '.coverage.%s' % coverage_name)
if config['options']['profile'] == 'yes':
cmd.append('-p -P %s' % config['dirs']['profile'])
if os.environ.get('JIT', 'no') == 'yes':
cmd.append('--jit')
cmd[0] = '%s %s' % (config['vm-runner']['runner'], cmd[0])
cmd = ' '.join(cmd)
with open(config['log']['trace'], 'a') as f_trace:
f_trace.write('CMD: %s\n' % cmd)
f_trace.write('ENV:\n')
for k, v in iteritems(env):
f_trace.write(' %s=%s\n' % (k, v))
with open(machine, 'w') as f_out:
try:
subprocess.check_call(cmd, stdout = f_out, stderr = f_out, shell = True, env = env)
except subprocess.CalledProcessError as e:
assert False, 'FORTH VM failed with exit code %s' % e.returncode
with open(out, 'r') as f_out:
output = f_out.read()
if 'INCORRECT RESULT' in output or 'WRONG NUMBER OF RESULTS' in output:
print_(output, file = sys.stderr)
assert False, 'Test provided incorrect results'
if diff_expected is None:
return
expected = tests_dir(*diff_expected)
if not os.path.exists(expected):
return
with open(expected, 'r') as f_expected:
with open(out, 'r') as f_actual:
diff = '\n'.join(list(difflib.unified_diff(f_expected.readlines(), f_actual.readlines(), lineterm = '')))
if diff:
print_('\n' + diff, file = sys.stderr)
assert False, 'Actual output does not match the expected.'
|
nilq/baby-python
|
python
|
import os
import urllib.parse
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig:
"""Base configuration"""
APP_NAME = 'Sunway Innovators'
DEBUG = False
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = os.environ.get('SECRET_KEY')
UPLOAD_FOLDER = 'upload/'
MAX_CONTENT_PATH = 26214400
MAIL_SERVER = 'smtp.sendgrid.net'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
EMAIL_SUBJECT_PREFIX = '[{}]'.format(APP_NAME)
EMAIL_SENDER = '{app_name} Team <{email}>'.format(
app_name=APP_NAME, email=MAIL_USERNAME)
REDIS_URL = os.getenv('REDISTOGO_URL') or 'http://localhost:6379'
urllib.parse.uses_netloc.append('redis')
url = urllib.parse.urlparse(REDIS_URL)
RQ_DEFAULT_HOST = url.hostname
RQ_DEFAULT_PORT = url.port
RQ_DEFAULT_PASSWORD = url.password
RQ_DEFAULT_DB = 0
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
SECRET_KEY = 'I4MS3CR3T'
class TestingConfig(BaseConfig):
"""Testing configuration"""
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_TEST_URL')
class StagingConfig(BaseConfig):
"""Staging configuration"""
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
class ProductionConfig(BaseConfig):
"""Production configuration"""
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
|
nilq/baby-python
|
python
|
"""Tests for soft actor critic."""
from absl.testing import absltest
import acme
from acme import specs
from acme.testing import fakes
from acme.utils import loggers
from magi.agents import sac
class SACTest(absltest.TestCase):
def test_sac(self):
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
action_dim=2, observation_dim=3, episode_length=10, bounded=True
)
spec = specs.make_environment_spec(environment)
# Make network purely functional
agent_networks = sac.make_networks(
spec,
policy_layer_sizes=(32, 32),
critic_layer_sizes=(32, 32),
)
# Construct the agent.
agent = sac.SACAgent(
environment_spec=spec,
networks=agent_networks,
config=sac.SACConfig(
target_entropy=sac.target_entropy_from_env_spec(spec),
min_replay_size=1,
batch_size=2,
),
seed=0,
)
# Try running the environment loop. We have no assertions here because all
# we care about is that the agent runs without raising any errors.
loop = acme.EnvironmentLoop(
environment,
agent,
logger=loggers.make_default_logger(label="environment", save_data=False),
)
loop.run(num_episodes=2)
if __name__ == "__main__":
absltest.main()
|
nilq/baby-python
|
python
|
from abc import ABCMeta, abstractclassmethod
import numpy as np
from keras.layers import Input, Lambda
from keras.models import Model
from model.Autoencoder import Autoencoder
from model.loss.kullbackLeiberLoss import kullbackLeiberLossConstructor
from model.loss.variationalAutoencoderLoss import variationalAutoencoderLossConstructor
from model.supplementary.sampling import samplingConstructor
class VariationalAutoencoder(Autoencoder, metaclass=ABCMeta):
def __init__(self, reconstructionLossConstructor, klLossWeight, inputRepresentationDimensions, latentRepresentationDimension):
self.__reconstructionLossConstructor = reconstructionLossConstructor
self.__klLossWeight = klLossWeight
self.__inputRepresentationDimensions = inputRepresentationDimensions
self.__latentRepresentationDimension = latentRepresentationDimension
def buildModels(self):
encoderLayers = self.encoderLayersConstructor()
decoderLayers = self.decoderLayersConstructor()
self.__buildAutoencoder(encoderLayers, decoderLayers)
self.__buildEncoder(encoderLayers)
self.__buildDecoder(decoderLayers)
def __buildAutoencoder(self, encoderLayers, decoderLayers):
# Input to the encoder and autoencoder models:
inputRepresentation = Input(shape=self.__inputRepresentationDimensions)
latentRepresentationMean, latentRepresentationLogVariance = encoderLayers(inputRepresentation)
latentRepresentation = Lambda(
samplingConstructor(self.__latentRepresentationDimension),
output_shape=(self.__latentRepresentationDimension,)
)([latentRepresentationMean, latentRepresentationLogVariance])
decodedInputRepresentation = decoderLayers(latentRepresentation)
self.__autoencoder = Model(inputRepresentation, decodedInputRepresentation)
self.__autoencoder.compile(
optimizer='adam',
loss=variationalAutoencoderLossConstructor(
self.__reconstructionLossConstructor,
self.__klLossWeight,
self.__inputRepresentationDimensions,
latentRepresentationMean,
latentRepresentationLogVariance),
metrics=[
self.__reconstructionLossConstructor(self.__inputRepresentationDimensions),
kullbackLeiberLossConstructor(latentRepresentationMean, latentRepresentationLogVariance)
]
)
def __buildEncoder(self, encoderLayers):
inputRepresentation = Input(shape=self.__inputRepresentationDimensions)
latentRepresentationMean, _ = encoderLayers(inputRepresentation)
self._encoder = Model(inputRepresentation, latentRepresentationMean)
def __buildDecoder(self, decoderLayers):
customLatentRepresentation = Input(shape=(self.__latentRepresentationDimension,))
customDecodedInputRepresentation = decoderLayers(customLatentRepresentation)
self.__decoder = Model(customLatentRepresentation, customDecodedInputRepresentation)
@abstractclassmethod
def encoderLayersConstructor(self):
raise NotImplementedError
@abstractclassmethod
def decoderLayersConstructor(self):
raise NotImplementedError
def evaluateLayersList(self, layersList, input):
intermediateResult = input
for layer in layersList:
intermediateResult = layer(intermediateResult)
return intermediateResult
def collapseLayers(self, layers):
return lambda input: self.evaluateLayersList(layers, input)
def encoder(self) -> Model:
return self._encoder
def decoder(self) -> Model:
return self.__decoder
def autoencoder(self) -> Model:
return self.__autoencoder
def train(
self,
trainingData: np.ndarray,
validationData: np.ndarray,
epochs,
batchSize):
return self.__autoencoder.fit(
trainingData,
trainingData,
shuffle=True,
epochs=epochs,
batch_size=batchSize,
validation_data=(validationData, validationData))
def summary(self):
self.__autoencoder.summary()
def saveWeights(self, location):
self.__autoencoder.save_weights(location)
def loadWeights(self, location):
self.__autoencoder.load_weights(location)
def evaluate(self, data, batchSize=100):
return self.__autoencoder.evaluate(data, data, batch_size=batchSize)
|
nilq/baby-python
|
python
|
import os
from utils import *
DATADIVR_PATH = os.path.realpath(os.path.join(os.path.dirname(os.getcwd()), "DataDiVR"))
LAYOUTS_DIR = os.path.join(DATADIVR_PATH, "viveNet/Content/data/layouts")
LINKS_DIR = os.path.join(DATADIVR_PATH, "viveNet/Content/data/links")
LABELS_DIR = os.path.join(DATADIVR_PATH, "viveNet/Content/data/labels")
ERRORS_TO_SHOW=10
layouts = [f for f in os.listdir(LAYOUTS_DIR) if os.path.isfile(os.path.join(LAYOUTS_DIR, f)) and os.path.splitext(f)[1] == ".csv"]
layout_line_counts = {}
for layout in layouts:
with open(os.path.join(LAYOUTS_DIR, layout)) as f:
for i, l in enumerate(f):
pass
layout_line_counts[layout] = i+1
links_lists = [f for f in os.listdir(LINKS_DIR) if os.path.isfile(os.path.join(LINKS_DIR, f))]
for links_list in links_lists:
record_errors = True
bad_lines = []
num_col_errors = 0
num_idx_errors = 0
matching_layouts = [layout for layout in layout_line_counts if links_list.startswith(os.path.splitext(layout)[0])]
if not matching_layouts:
print("ERROR: Links list without matching layout detected: %s." % links_list)
continue
shortest_matching_layout_length = min([layout_line_counts[layout] for layout in matching_layouts])
with open(os.path.join(LINKS_DIR, links_list)) as f:
for i, line in enumerate(f):
line = line.split(",")
# Validate number of columns
if len(line) != 2:
num_col_errors += 1
if record_errors:
bad_lines.append(["Illegal number of columns", 6, len(line), i, ",".join(line)])
if len(bad_lines) == ERRORS_TO_SHOW:
record_errors = False
# Validate references to nodes
for x in range(2):
if x >= len(line):
continue
if not validate_index(line[x], shortest_matching_layout_length):
num_idx_errors += 1
if record_errors:
bad_lines.append(["Illegal node reference (out of range)", "int 0 <= i < %s" % line_count, line[x], i, ",".join(line)])
if len(bad_lines) == ERRORS_TO_SHOW:
record_errors = False
if num_col_errors or num_idx_errors:
print("FATAL ERROR: errors in file %s\n" % links_list)
print("Note: Each row should contain exactly two comma-separated fields:\n"
" [N1, N2] \n"
" N1 and N2 are the 0-indexed IDs (line numbers) of the nodes in the corresponding layout.\n")
asciitable(["Error type", "Count"], [list(x) for x in zip (["Invalid number of columns", "Invalid index values", "Invalid RGB values"],
[str(num_col_errors), str(num_idx_errors), str(num_rgb_errors)])])
print("\nFirst %d errors:" % ERRORS_TO_SHOW)
asciitable(["Issue", "Expected", "Got", "Line #", "Line"], bad_lines)
else:
print("All tests passed for %s!"% links_list)
|
nilq/baby-python
|
python
|
from app import app
from flask import render_template, request
from forms import GetLucky
from random import randint
@app.route('/')
def lucky_static():
lucky_num = randint(1, 10)
return render_template('simple.html', lucky_num=lucky_num)
@app.route('/<max>/')
def lucky_max(max):
lucky_num = randint(1, int(max))
return render_template('simple.html', lucky_num=lucky_num)
def get_game_nums(num=5, max=10):
game_nums = []
while len(game_nums) < num:
n = randint(1, 10)
if not n in game_nums:
game_nums.append(n)
return game_nums
@app.route('/game/')
def game():
game_nums = get_game_nums()
return render_template('game.html', game_nums = game_nums)
@app.route('/nums/', methods=['GET', 'POST'])
def get_nums():
form = GetLucky()
if form.validate_on_submit():
if request.method == 'POST':
numbers = form.numbers.data
game_nums = get_game_nums()
nums = numbers.split()
wins = 0
for num in nums:
if int(num) in game_nums:
wins += 1
return render_template('game.html', game_nums=game_nums,
player_nums=numbers, wins=wins)
else:
return render_template('get_lucky.html', form=form)
if request.method == 'GET':
return render_template('get_lucky.html', form=form)
|
nilq/baby-python
|
python
|
import unittest
from stock_prices import fetchStockData
import io
import sys
class TestFileName(unittest.TestCase):
def test_function1(self):
symbol = 'AAPL'
self.assertTrue(fetchStockData(symbol), None)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import re
from rdp.symbols import Regexp, flatten
letters = Regexp(r'[a-zA-Z]+')
digits = Regexp(r'[0-9]+')
hexdigits = Regexp(r'[0-9a-fA-F]+')
octdigits = Regexp(r'[0-7]+')
whitespace = Regexp(r'\s+')
word = Regexp(r'[a-zA-Z0-9_]+')
hyphen_word = Regexp(r'[a-zA-Z0-9_-]+')
identifier = Regexp(r'[a-zA-Z_][a-zA-Z0-9_]*')
hyphen_identifier = Regexp(r'[a-zA-Z_-][a-zA-Z0-9_-]*')
horizontal_whitespace = Regexp(r'[ \t]+')
whitespace = Regexp(r'[ \t\n\r]+')
py_decimalinteger = Regexp(r'[1-9]\d*') | '0'
py_hexinteger = Regexp(r'0[xX][0-9a-fA-F]+')
py_octinteger = Regexp(r'0[oO][0-7]+') | Regexp(r'0[0-7]+')
py_bininteger = Regexp(r'0[bB][01]+')
float_literal = Regexp(r'(?:[1-9]\d*|0)?\.\d*(?:[eE][+-]?\d+)?')
py_integer = py_decimalinteger | py_hexinteger | py_octinteger | py_bininteger
def quoted_string(quote_char, escape_char='\\'):
assert len(quote_char) == 1
return Regexp(r'{q}(?:{e}{q}|[^{q}])*{q}'.format(
q=quote_char,
e=re.escape(escape_char),
))
double_quoted_string = quoted_string('"')
single_quoted_string = quoted_string("'")
|
nilq/baby-python
|
python
|
__author__ = 'admin'
import pretender_defaults
import pretend_helpers
class Request:
def __init__(self):
self.url = pretender_defaults.url
self.headers = {}
self.body = pretender_defaults.request_body
self.method = pretender_defaults.method
def set_request_entities(self,request_json):
self.url = pretend_helpers.get_url_from_json(request_json) # get the URL from the json
self.method = request_json['method'] if request_json.has_key('method') else pretender_defaults.method # get the request method from the json
self.body = pretend_helpers.get_body_from_json(request_json) # get the request body from the json
self.headers = pretend_helpers.get_headers_from_json(request_json) # get the request headers from the json
|
nilq/baby-python
|
python
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is an absolute override file, indicating that a higher priority version
# of the file will completely replace a lower priority version of the file.
##
import TimeRange
from com.raytheon.uf.common.dataplugin.gfe.db.objects import GridParmInfo
class GridInfo(object):
##
# Constructor. gridTime is required, supply gridParmInfo OR the rest of
# the parameters (not both).
#
def __init__(self, parmID=None, gridLoc=None, maxLimit=None, minLimit=None,
units=None, gridTime=None, type=None, timeIndependentParm=None,
timeConstraints=None, precision=None, rateParm=None,
descriptiveName=None, gridParmInfo=None):
if (gridParmInfo==None):
gridParmInfo = GridParmInfo(parmID, gridLoc, type, units,
descriptiveName, minLimit, maxLimit,
precision, timeIndependentParm,
timeConstraints, rateParm)
elif parmID is not None or \
gridLoc is not None or \
maxLimit is not None or \
minLimit is not None or \
units is not None or \
type is not None or \
timeIndependentParm is not None or \
timeConstraints is not None or \
precision is not None or \
rateParm is not None or \
descriptiveName is not None:
raise IllegalArgumentException("Only gridTime can be specified with gridParmInfo")
self.gridParmInfo = gridParmInfo
if isinstance(gridTime, TimeRange.TimeRange):
self._gridTime = gridTime;
else :
self._gridTime = TimeRange.TimeRange(gridTime)
##
# Get the parm ID of the Parm this grid belongs to.
# @return: the Parm ID
# @rtype: com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID
def getParmID(self):
return self.gridParmInfo.getParmID()
##
# Return the grid location.
# @return: the grid location.
# @rtype com.raytheon.uf.common.dataplugin.gfe.db.objects.GridLocation
def gridLocation(self):
return self.gridParmInfo.getGridLoc()
##
# Return the maximum value allowed for this grid.
# @return: Maximum value
# @rtype: float
def maxLimit(self):
return self.gridParmInfo.getMaxValue()
##
# Return the minimum value allowed for this grid.
# @return Minimum value
# @rtype: float
def minLimit(self):
return self.gridParmInfo.getMinValue()
##
# Return the time range of this grid.
# @return: The valid time range of the grid.
# @rtype: TimeRange.TimeRange
def gridTime(self):
return self._gridTime
##
# Return the grid type.
# @return: the grid type
# @rtype: com.raytheon.uf.common.dataplugin.gfe.db.objects.GridParmInfo.GridType
def type(self):
return self.gridParmInfo.getGridType()
##
# The parm units, as a String.
# @return: The units
# @rtype: String
def units(self):
return self.gridParmInfo.getUnitString()
##
# @return: Whether this is a time independent parm.
# @rtype: boolean
def timeIndependentParm(self):
return self.gridParmInfo.getTimeIndependentParm()
##
# @return: The time constraints of this grid
# @rtype: com.raytheon.uf.common.dataplugin.gfe.db.objects.TimeConstraints
def tc(self):
return self.gridParmInfo.getTimeConstraints()
##
# @rtype: int
def precision(self):
return self.gridParmInfo.getPrecision()
##
# Return whether this grid's parm is a rate parm.
# @rtype: boolean
def rateParm(self):
return self.gridParmInfo.isRateParm()
|
nilq/baby-python
|
python
|
# Kubos SDK
# Copyright (C) 2016 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
module_file_name = 'module.json'
k_lib_path = 'KUBOS_LIB_PATH'
#keys for link json data
module_key = 'modules'
target_key = 'targets'
target_mount_dir = os.path.join('/', 'usr', 'lib', 'yotta_targets')
def get_project_name():
module_file_path = os.path.join(os.getcwd(), module_file_name)
if os.path.isfile(module_file_path):
with open(module_file_path, 'r') as module_file:
data = json.load(module_file)
name = data['name']
return name
else:
return None
def get_global_link_file():
home_dir = os.path.expanduser('~')
kubos_file_path = os.path.join(home_dir, '.kubos-link-global.json')
return kubos_file_path
def get_local_link_file():
this_dir = os.getcwd()
path = os.path.join(this_dir, '.kubos-link.json')
return path
def add_env_var(var_name, value):
if not hasattr(os.environ, var_name):
os.environ[var_name] = value
else:
os.environ[var_name] += ':%s' % value
def add_kubos_lib_path(value):
add_env_var(k_lib_path, value)
|
nilq/baby-python
|
python
|
import pygame
import attore
# Classe specifica per i pesci che eredita dalla classe Attore
class Pesce(attore.Attore):
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Various utilities for interacting with the API."""
import os
import re
import pyodbc
from django.conf import settings
from djimix.constants import TERM_LIST
from djimix.core.database import get_connection
from djimix.core.database import xsql
from djpsilobus.core.data import DEPARTMENTS
from djpsilobus.core.data import ITEM_METADATA
from djpsilobus.core.dspace import Manager
from djpsilobus.core.sql import SECTIONS
def sections(code=None, year=None, sess=None, fid=None):
"""Fetch all course sections.
Args:
code: a department code
year: YYYY
sess: a tuple of sessions
fid: a faculty ID
Returns:
all courses that meet the above criteria.
"""
where = ''
if code:
where += ' AND crs_rec.dept = "{0}" '.format(code)
if year:
where += ' AND sec_rec.yr = {0} '.format(year)
if sess:
where += ' AND sec_rec.sess in {0} '.format(sess)
if fid:
where += ' AND sec_rec.fac_id = {0} '.format(fid)
# close connection when exiting with block
sql = SECTIONS(where=where)
with get_connection() as connection:
rows = xsql(sql, connection)
try:
return rows.fetchall()
except AttributeError:
#return None
return sql
def division_departments(code):
"""Fetch all departments for a division given the four letter code."""
sql = """
SELECT * FROM dept_table
WHERE div = '{0}' ORDER BY txt
""".format(code)
with get_connection() as connection:
return xsql(sql, connection).fetchall()
def find_file(phile):
"""Using the DSpace REST API, execute a search for a file name
contained in the dc.title.alternative metadata field.
Args:
phile: a file name.
Returns:
a json object.
Raises:
none.
"""
req_dict = {
'key': 'dc.title.alternative',
'value': '{0}'.format(phile),
'language': 'en_US',
}
manager = Manager()
return manager.request(
'items/find-by-metadata-field', 'post', req_dict,
)
def get_items(collection_id):
"""Fetch items form the API.
Args:
collection_id: a collection UUID
Returns:
all items in that collection
Raises:
none.
"""
manager = Manager()
return manager.request(
'collections/{0}/items'.format(collection_id), 'get',
)
def create_item(item):
"""Create an item through the API.
Args:
item: a dictionary with the following keys:
course_number, title, year, term, fullname
Returns:
new_item: the newly created item
Raises:
none.
"""
item_data = ITEM_METADATA
prefix = 'UG'
if item['term'][0] == 'G':
prefix = 'GR'
cat = '{0}{1}'.format(prefix, item['year'][-2:])
sql = 'SELECT * FROM crsabstr_rec WHERE crs_no="{0}" AND cat="{1}"'.format(
item['course_number'], cat,
)
with get_connection() as connection:
row = xsql(sql, connection)
if row:
row = row.fetchone()
if row and row.abstr:
abstr = row.abstr
else:
abstr = ''
dept = item['course_number'][:4].strip()
collection_id = DEPARTMENTS[dept]
# author
item_data['metadata'][0]['value'] = item['fullname']
# description
item_data['metadata'][1]['value'] = abstr
# title
item_data['metadata'][2]['value'] = item['title']
# title alternative
item_data['metadata'][3]['value'] = item['title_alt']
# subject year
item_data['metadata'][4]['value'] = item['year']
# subject term
item_data['metadata'][5]['value'] = TERM_LIST[item['term']]
uri = 'collections/{0}/items'.format(collection_id)
manager = Manager()
return manager.request(uri, 'post', item_data)
def syllabus_name(course):
"""Creates the syllabus name that DSpace expects."""
lastname = re.sub('[^0-9a-zA-Z]+', '_', course.lastname)
firstname = re.sub('[^0-9a-zA-Z]+', '_', course.firstname)
return '{0}_{1}_{2}_{3}_{4}_{5}_syllabus'.format(
course.yr,
course.sess,
course.crs_no.replace(' ', '_'),
course.sec_no,
lastname,
firstname,
)
def sheet(ws, division, department, courses):
"""Create a spread sheet."""
# set sheet title
ws.title = department
# create a list for each row and insert into workbook
for course in courses:
section = []
for course_item in course:
section.append(course_item)
# check for syllabus
phile = syllabus_name(course)
path = '{0}{1}/{2}/{3}/{4}/{5}.pdf'.format(
settings.UPLOADS_DIR,
course.yr,
course.sess,
division,
department,
phile,
)
if os.path.isfile(path):
syllabus = 'Yes'
else:
syllabus = 'No'
section.append(syllabus)
ws.append(section)
return ws
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
from pyxl.codec.transform import pyxl_invert_string, pyxl_transform_string
if __name__ == '__main__':
invert = invertible = False
if sys.argv[1] == '-i':
invertible = True
fname = sys.argv[2]
elif sys.argv[1] == '-r':
invert = True
fname = sys.argv[2]
else:
fname = sys.argv[1]
with open(fname, 'r') as f:
contents = f.read()
if invert:
print(pyxl_invert_string(contents), end='')
else:
print(pyxl_transform_string(contents, invertible), end='')
|
nilq/baby-python
|
python
|
"""Support for the PrezziBenzina.it service."""
import datetime as dt
from datetime import timedelta
import logging
from prezzibenzina import PrezziBenzinaPy
import voluptuous as vol
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_TIME, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_FUEL = "fuel"
ATTR_SERVICE = "service"
ATTRIBUTION = "Data provided by PrezziBenzina.it"
CONF_STATION = "station"
CONF_TYPES = "fuel_types"
ICON = "mdi:fuel"
FUEL_TYPES = [
"Benzina",
"Benzina speciale",
"Diesel",
"Diesel speciale",
"GPL",
"Metano",
]
SCAN_INTERVAL = timedelta(minutes=120)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATION): cv.string,
vol.Optional(CONF_NAME, None): cv.string,
vol.Optional(CONF_TYPES, None): vol.All(cv.ensure_list, [vol.In(FUEL_TYPES)]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PrezziBenzina sensor platform."""
station = config[CONF_STATION]
name = config.get(CONF_NAME)
types = config.get(CONF_TYPES)
client = PrezziBenzinaPy()
dev = []
info = client.get_by_id(station)
if name is None:
name = client.get_station_name(station)
for index, info in enumerate(info):
if types is not None and info["fuel"] not in types:
continue
dev.append(
PrezziBenzinaSensor(
index, client, station, name, info["fuel"], info["service"]
)
)
add_entities(dev, True)
class PrezziBenzinaSensor(Entity):
"""Implementation of a PrezziBenzina sensor."""
def __init__(self, index, client, station, name, ft, srv):
"""Initialize the PrezziBenzina sensor."""
self._client = client
self._index = index
self._data = None
self._station = station
self._name = f"{name} {ft} {srv}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._data["price"].replace(" €", "")
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._data["price"].split(" ")[1]
@property
def device_state_attributes(self):
"""Return the device state attributes of the last update."""
timestamp = dt.datetime.strptime(
self._data["date"], "%d/%m/%Y %H:%M"
).isoformat()
attrs = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_FUEL: self._data["fuel"],
ATTR_SERVICE: self._data["service"],
ATTR_TIME: timestamp,
}
return attrs
def update(self):
"""Get the latest data and updates the states."""
self._data = self._client.get_by_id(self._station)[self._index]
|
nilq/baby-python
|
python
|
import numpy as np
import skfuzzy as fuzz
class cluster():
def __init__(self,x,y,U,n_clusters):
data = np.reshape(U,(1,-1))
cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(data,n_clusters,2,error=0.0001, maxiter=10000, init=None)
self.labels = np.reshape(np.argmax(u,axis=0),U.shape)
self.labels = self.relabel(self.labels,cntr,n_clusters)
self.ys = self.get_ys(self.labels,y,n_clusters)
def relabel(self,label,center,n_clusters):
tmp = np.linspace(0,n_clusters-1,n_clusters,dtype=np.int)
center,tmp = zip(*sorted(zip(center,tmp)))
xx,yy = np.shape(label)
mask = np.zeros((xx,yy,n_clusters))
for ii in range(n_clusters):
mask[:,:,ii] = label == tmp[ii]
for ii in range(n_clusters):
label[np.nonzero(mask[:,:,ii])] = ii+1
return label
def get_ys(self,label,y,n_clusters):
nx,ny = label.shape
ys = np.zeros((nx,n_clusters-1))
for n in range(n_clusters-1):
for ii in range(nx):
ytmp = np.array([])
for jj in range(ny-1):
if (label[ii,jj] == n+2 and label[ii,jj+1] == n+1) or (label[ii,jj] == n+1 and label[ii,jj+1] == n+2):
ytmp = np.append(ytmp,0.5*(y[jj]+y[jj+1]))
if len(ytmp) != 0:
ys[ii,n] = np.max(ytmp)
else:
ys[ii,n] = 0
return ys
|
nilq/baby-python
|
python
|
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QDialog
from src.ui_elements.bonusingredient import Ui_addingredient
from src.config_manager import shared
from src.logger_handler import LoggerHandler
from src.display_controller import DP_CONTROLLER
from src.database_commander import DB_COMMANDER
from src.machine.controller import MACHINE
from src.tabs.bottles import set_fill_level_bars
from src.dialog_handler import UI_LANGUAGE
LOG_HANDLER = LoggerHandler("additional_ingredient", "production_logs")
class GetIngredientWindow(QDialog, Ui_addingredient):
""" Creates a Dialog to chose an additional ingredient and the amount
to spend this ingredient.
"""
def __init__(self, parent=None):
""" Init. Connects all the buttons and get values for the Combobox. """
super().__init__()
self.setupUi(self)
self.setWindowFlags(Qt.Window | Qt.CustomizeWindowHint | Qt.WindowStaysOnTopHint)
DP_CONTROLLER.inject_stylesheet(self)
# Set window properties
self.setWindowIcon(QIcon(parent.icon_path))
self.mainscreen = parent
# Connect all the buttons
self.PBplus.clicked.connect(lambda: DP_CONTROLLER.plusminus(self.LAmount, "+", 20, 100, 10))
self.PBminus.clicked.connect(lambda: DP_CONTROLLER.plusminus(self.LAmount, "-", 20, 100, 10))
self.PBAusgeben.clicked.connect(self.ausgeben_clicked)
self.PBAbbrechen.clicked.connect(self.abbrechen_clicked)
all_bottles = DB_COMMANDER.get_ingredients_at_bottles()
bottles = [x for x in all_bottles if x != ""]
DP_CONTROLLER.fill_single_combobox(self.CBingredient, bottles, first_empty=False)
UI_LANGUAGE.adjust_bonusingredient_screen(self)
self.showFullScreen()
DP_CONTROLLER.set_display_settings(self)
def abbrechen_clicked(self):
""" Closes the Window without a change. """
self.close()
def ausgeben_clicked(self):
""" Calls the Progressbarwindow and spends the given amount of the ingredient. """
ingredient_name, volume = DP_CONTROLLER.get_ingredient_window_data(self)
bottle, level = DB_COMMANDER.get_ingredient_bottle_and_level_by_name(ingredient_name)
self.close()
if volume > level:
DP_CONTROLLER.say_not_enough_ingredient_volume(ingredient_name, level, volume)
self.mainscreen.tabWidget.setCurrentIndex(3)
return
print(f"Spending {volume} ml {self.CBingredient.currentText()}")
made_volume, _, _ = MACHINE.make_cocktail(self.mainscreen, [bottle], [volume], ingredient_name, False)
DB_COMMANDER.increment_ingredient_consumption(ingredient_name, made_volume[0])
set_fill_level_bars(self.mainscreen)
volume_string = f"{volume} ml"
LOG_HANDLER.log_event("INFO", f"{volume_string:6} | {ingredient_name}")
self.mainscreen.prow_close()
shared.cocktail_started = False
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding: UTF-8
#---------------------------------------------------------------
# author:"Haxhimitsu"
# date :"2021/01/06"
# cite :
#Usage
# python3 src/tf_sample_ver2.0.py --dataset_path "{your input directory}" --log_dir "{your output directry}
#---------------------------------------------------------------
#import keras,tensorflow module
import keras
from keras.utils import np_utils
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.preprocessing.image import array_to_img, img_to_array, load_img
import keras.callbacks
from keras.models import Sequential, model_from_json
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np
import cv2
import os
import csv
import copy
import random
import argparse
#my module
import sys
from utils.myutils import myutil
#check my module
myutil=myutil()
myutil.sayStr("Hello")
#################setting GPU useage#####################
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.8, # up to 80%
allow_growth=True # True->gpu consumption limit enable, False->gpu consumption limit disable
))
sess = sess = tf.Session(config=config)
#####################################################
#argument
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path",required=True,help="path to root dataset directory")
parser.add_argument("--train_path",help="path to train_data")
parser.add_argument("--val_path", help="path to val_data")
parser.add_argument("--test_path", help="pat to test_path")
parser.add_argument("--max_epochs", type =int ,default=100,help="set max epoch(int)")
parser.add_argument("--batch_size", type =int ,default=32,help="set batch size 2,4,6,8,..")
parser.add_argument("--save_weight_name", type=str,default="test",help="set_network_weight_name")
parser.add_argument("--save_json_name", type=str,default="test",help="set_json_name")
parser.add_argument("--log_dir", required=True, help="set_to_log_directory")
a = parser.parse_args()
log_dir=a.log_dir
myutil.create_directory(log_dir)#instance from myutil
print("log_dir=",log_dir)
weight_filename=a.save_weight_name+".hdf5"#add save file name extention
json_filename=a.save_json_name+".json"
max_epochs=a.max_epochs
if a.train_path is None:#trainpathが引数で指定されていない場合,デフォルトでdataset_path/trains/を参照
train_path=a.dataset_path+"trains/"
#print("train_path",train_path)
else:
train_path=a.train_path
#print("train_path",train_path)
if a.val_path is None:
val_path=a.dataset_path+"valids/"
else:
val_path=a.val_path
if a.test_path is None:
test_path=a.dataset_path+"tests/"
else:
test_path=a.test_path
#train path 内のディレクトリ数をカウント.
#この数が,分類数になる
print(len(os.listdir(train_path)))
#myutil.createnetworkでネットワークを作成,modelに渡す.
model=myutil.create_network(category_num=len(os.listdir(train_path)))
try:
model.load_weights(os.path.join(log_dir,weight_filename))#学習結果がある場合,weightを読み込み
print("load model")
#model compile
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
train_img,train_label,val_img,val_label=myutil.create_dataset(train_path,val_path)##myutil.createdatasetでデータセットの作成
score = model.evaluate(val_img, val_label, verbose=0)#validation を使って評価
print('Test loss :', score[0])
print('Test accuracy :', score[1])
print("pass check_acc")
myutil.check_acc(model,test_path,log_dir)#test/の各クラスを,myutil.check_accで評価
#result=myutil.acc2(model,test_path,log_dir) #myutil.acc2を使う場合,testpathは単一のディレクトリを指定する->test/class1/
print("pass check_acc")
except OSError:
print(".h5 file not found")
print("start loading the data set")
train_img,train_label,val_img,val_label=myutil.create_dataset(train_path,val_path)
###################EalyStopping#######################
"""
検証データn対する誤差が増加してくるタイミングが訓練データにオーバーフィッティング
し始めているタイミングと考えることができるので,エポックごとの検証データに対する誤差の値を監視し,
一定のエポック数連続して誤差がそれまでの最小値をしたまわることがなければ打ち切る.
monitor='監視する値の指定'
patience='監視している値が何エポック連続で上回ったら早期終了するか'
verbose='早期終了したかどうかをログで出力するか'
"""
es = EarlyStopping(monitor='val_loss',
patience=20,
verbose=1)
# コンパイル
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
#####################################################
history = model.fit(train_img, train_label, batch_size=a.batch_size, epochs=max_epochs,validation_data = (val_img, val_label), verbose = 1,callbacks=[es])#学習開始 パラメータは名前から察して
model.save_weights(os.path.join(log_dir,weight_filename))#このコードがあるフォルダに重みを保存する
json_dir=log_dir+"/"+json_filename#set json save path
open(json_dir,"w").write(model.to_json())#save model as json
score = model.evaluate(val_img, val_label, verbose=0)
print('Test loss :', score[0])
print('Test accuracy :', score[1])
myutil.check_acc(model,test_path,log_dir)
del train_img,train_label,val_img,val_label
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
"""
Defines a class Review.
"""
from models.review import Review
import unittest
import models
import os
class TestReview(unittest.TestCase):
"""Represent a Review."""
def setUp(self):
"""SetUp method"""
self.review = Review()
def TearDown(self):
"""TearDown method."""
del self.review
def test_docstring(self):
"""Test docstring for the module and the class"""
self.assertIsNotNone(
models.review.__doc__,
"No docstring in the module"
)
self.assertIsNotNone(Review.__doc__, "No docstring in the class")
def test_permissions_file(self):
"""Test File review.py permissions"""
test_file = os.access("models/review.py", os.R_OK)
self.assertTrue(test_file, "Read permissions")
test_file = os.access("models/review.py", os.W_OK)
self.assertTrue(test_file, "Write Permissions")
test_file = os.access("models/review.py", os.X_OK)
self.assertTrue(test_file, "Execute permissions")
def test_type_object(self):
"""Test type object of Review"""
self.assertEqual(
str(type(self.review)),
"<class 'models.review.Review'>")
self.assertIsInstance(self.review, Review)
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
follow = []
follower = []
group = []
dogList = []
catList = []
count = 0
followInfor = {}
for i in range(10):
path = r'raw data\catfollow\CatfollowInfo_%s.pickle'%str(i)
with open(path, 'rb') as file:
data = pickle.load(file)
for key in data.keys():
infomation = data[key]
if [int(i) for i in data[key]] != [0, 0, 0]:
dogList.append([int(i) for i in data[key]])
count += 1
follow.append(infomation[0])
follower.append(infomation[1])
group.append(infomation[2])
followInfor[key] = [follow, follower, group]
followd = []
followerd = []
groupd = []
count = 0
for i in range(10):
path = r'raw data\dogfollow\followInfo_%s.pickle' % str(i)
with open(path, 'rb') as file:
data = pickle.load(file)
for key in data.keys():
infomation = data[key]
if [int(i) for i in data[key]] != [0, 0, 0]:
dogList.append([int(i) for i in data[key]])
count += 1
followd.append(infomation[0])
followerd.append(infomation[1])
groupd.append(infomation[2])
# print(count)
cat = [follow, follower, group]
dog = [followd, followerd, groupd]
N = ['follow', 'follower', 'group']
# print(cat[0])
# print(dog[0])
labels = np.zeros((1, 1945)).tolist()[0]
labels.extend(np.ones((1, 1946)).tolist()[0])
# 0 for cat 1 and for dog
dogList.extend(catList)
wholeList = dogList
#
# result = np.zeros((6, 10))
# for i in range(10):
# X_train, X_test, y_train, y_test = train_test_split(matrix_projected, labels, test_size=0.2, random_state=3)
# #
# # no significant find
# #
# lr = LogisticRegression()
# lr.fit(X_train, y_train)
# result[0][i] = lr.score(X_test, y_test)
# # print('logistic regression score: ', lr.score(X_test, y_test))
#
#
# clf = DecisionTreeClassifier()
# clf.fit(X_train, y_train)
# result[1][i] = clf.score(X_test, y_test)
# # print('ldecision tree score: ', clf.score(X_test, y_test))
#
# clf = SVC()
# clf.fit(X_train, y_train)
# result[2][i] = clf.score(X_test, y_test)
# # print('SVM score: ', clf.score(X_test, y_test))
#
# clf = GaussianNB()
# clf.fit(X_train, y_train)
# result[3][i] = clf.score(X_test, y_test)
# # print('Naive bayes score: ', clf.score(X_test, y_test))
#
# neigh = KNeighborsClassifier(n_neighbors=3)
# neigh.fit(X_train, y_train)
# result[4][i] = neigh.score(X_test, y_test)
# # print('k nearest neighbour score: ', neigh.score(X_test, y_test))
#
# clf = GradientBoostingClassifier()
# clf.fit(X_train, y_train)
# result[5][i] = clf.score(X_test, y_test)
# # print('boosting score: ', clf.score(X_test, y_test))
# # print(result)
#
# result = result.mean(axis=1)
# for item in result:
# print(item)
# fig = plt.figure()
# array, bins = np.histogram(np.array(cat[1]).astype('float'), bins='auto')
# plt.hist(array, bins)
# plt.xlim((0, 1))
# fig1 = plt.figure()
# arrayd, binsd = np.histogram(np.array(dog[1]).astype('float'), bins='auto')
# plt.hist(arrayd, binsd)
# plt.xlim((0, 1))
# plt.show()
# for i in range(3):
# for j in range(3):
# position = '33%s'%str(3*(i)+(j+1))
# ax = plt.subplot(int(position))
# # plt.scatter(cat[j], cat[i], c='r', alpha=0.1)
# plt.scatter(dog[j], dog[i], c='b', alpha=0.1)
# plt.xlabel(N[j])
# plt.ylabel(N[i])
# # plt.xlim((-10, 500))
# # plt.ylim((-10, 500))
# arrayc, binsc = np.histogram(np.array(cat[0]).astype('float'), bins='auto')
# arrayd, binsd = np.histogram(np.array(dog[0]).astype('float'), bins='auto')
leg = ['cat', 'dog']
#
# plt.hist([arrayc, arrayd], bins=binsd, label=leg)
# plt.legend(prop={'size': 10})
# plt.title("number of users' following")
# plt.xlim((-1, 50))
# plt.ylim((-1, 50))
fig2 = plt.figure()
arrayc, binsc = np.histogram(np.array(cat[2]).astype('int'), bins='auto')
arrayd, binsd = np.histogram(np.array(dog[2]).astype('int'), bins='auto')
plt.hist([np.array(cat[1]).astype('int'), np.array(dog[1]).astype('int')], binsc, label=leg)
plt.legend(prop={'size': 10})
plt.xlabel('number of groups')
print(arrayc)
print(arrayd)
plt.title("number of groups users participate")
plt.xlim((-1, 15))
plt.ylim((-1, 2000))
# plt.scatter(cat[0], cat[1], c='r', alpha=0.1)
# plt.scatter(dog[0], dog[1], c='b', alpha=0.1)
plt.show()
# fig3 = plt.figure()
# plt.scatter(group, follower)
# plt.ylim((0, 60))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3.6
# -*- coding:utf-8 -*-
__author__ = 'Lu ShaoAn'
__version__ = '1.0'
__date__ = '2021.05.13'
__copyright__ = 'Copyright 2021, PI'
import torch
res = torch.nn.functional.softmax(torch.tensor([13,9,9], dtype=torch.float32))
print(res)
|
nilq/baby-python
|
python
|
from drf_yasg.utils import swagger_auto_schema
from product.models import Category, Ingredient, Pizza
from product.serializers import (CategorySerializer, IngredientSerializer,
PizzaSerializer)
from product.utils import resource_checker
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
############################## Endpoints of Pizzas #################################
class PizzasAPIView(APIView):
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(responses={200: PizzaSerializer(many=True)})
def get(self, request, format=None):
"""Return a list of Pizza objects"""
if request.user.is_superuser:
pizzas = Pizza.objects.all()
serializer = PizzaSerializer(pizzas, many=True)
return Response(serializer.data)
else:
pizzas = Pizza.objects.all().filter(is_active=True)
serializer = PizzaSerializer(pizzas, many=True)
return Response(serializer.data)
@swagger_auto_schema(responses={201: PizzaSerializer()})
def post(self, request, format=None):
"""Create a new pizza object"""
serializer = PizzaSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PizzaDetailAPIView(APIView):
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(responses={200: PizzaSerializer()})
@resource_checker(Pizza)
def get(self, request, pk, format=None):
"""Get a pizza object by ID"""
pizza = Pizza.objects.get(pk=pk)
serializer = PizzaSerializer(pizza)
return Response(serializer.data)
@swagger_auto_schema(responses={200: PizzaSerializer()})
@resource_checker(Pizza)
def put(self, request, pk, format=None):
"""Update a pizza object"""
pizza = Pizza.objects.filter(id=pk).first()
serializer = PizzaSerializer(pizza, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(responses={204: 'Pizza deleted succesfully'})
@resource_checker(Pizza)
def delete(self, request, pk, format=None):
"""Delete a pizza object"""
pizza = Pizza.objects.filter(id=pk).first()
pizza.delete()
return Response(
{"message": f"Pizza '{pizza}' deleted succesfully"},
status=status.HTTP_204_NO_CONTENT
)
class DeleteIngredientPizza(APIView):
"""Delete an ingredient object of a pizza"""
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(responses={204: 'Ingredient of pizza deleted succesfully'})
def delete(self, request, pizza_id, ingredient_id, format=None):
pizza = Pizza.objects.get(id=pizza_id)
ingredient = Ingredient.objects.get(id=ingredient_id)
pizza.ingredients.remove(ingredient)
return Response({
"message": f"Se ha removido el ingrediente {ingredient} de la pizza {pizza}"
}, status=status.HTTP_204_NO_CONTENT)
############################# Endpoints of Ingredients #############################
class IngredientsAPIView(APIView):
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(responses={200: IngredientSerializer(many=True)})
def get(self, request, format=None):
"""Return a list of Ingredient objects"""
ingredients = Ingredient.objects.all()
serializer = IngredientSerializer(ingredients, many=True)
return Response(serializer.data)
@swagger_auto_schema(responses={201: IngredientSerializer()})
def post(self, request, format=None):
"""Create a new Ingredient"""
serializer = IngredientSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class IngredientDetailAPIView(APIView):
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(responses={200: IngredientSerializer(many=True)})
@resource_checker(Ingredient)
def get(self, request, pk, format=None):
"""Get an ingredient object by ID"""
ingredient = Ingredient.objects.get(pk=pk)
serializer = IngredientSerializer(ingredient)
return Response(serializer.data)
@swagger_auto_schema(responses={200: IngredientSerializer()})
@resource_checker(Ingredient)
def put(self, request, pk, format=None):
"""Update an ingredient object"""
ingredient = Ingredient.objects.filter(id=pk).first()
serializer = IngredientSerializer(ingredient, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(responses={204: 'Ingredient deleted succesfully'})
@resource_checker(Ingredient)
def delete(self, request, pk, format=None):
"""Delete an ingredient object"""
ingredient = Ingredient.objects.filter(id=pk).first()
pizza_with_that_ingredient = Pizza.objects.filter(
ingredients=ingredient).exists()
# If there is a pizza associated with that ingredient, it cannot be deleted
if pizza_with_that_ingredient:
return Response(
{"message": "There is a pizza with that ingredient, it cannot be deleted"}
)
else:
ingredient.delete()
return Response(
{"message": f"Ingredient '{ingredient}' deleted succesfully"},
status=status.HTTP_204_NO_CONTENT
)
############################# Endpoints of Categories #############################
class CategoriesAPIView(APIView):
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(responses={200: CategorySerializer(many=True)})
def get(self, request, format=None):
"""Return a list of Category objects"""
categories = Category.objects.all()
serializer = CategorySerializer(categories, many=True)
return Response(serializer.data)
@swagger_auto_schema(responses={201: CategorySerializer()})
def post(self, request, format=None):
"""Create a new category object"""
serializer = CategorySerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CategoryDetailAPIView(APIView):
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(responses={200: CategorySerializer(many=True)})
@resource_checker(Category)
def get(self, request, pk, format=None):
"""Get a category object by ID"""
category = Category.objects.get(pk=pk)
serializer = CategorySerializer(category)
return Response(serializer.data)
@swagger_auto_schema(responses={200: CategorySerializer()})
@resource_checker(Category)
def put(self, request, pk, format=None):
"""Update a category object"""
category = Category.objects.filter(id=pk).first()
serializer = CategorySerializer(category, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(responses={204: 'Category deleted succesfully'})
@resource_checker(Category)
def delete(self, request, pk, format=None):
"""Delete a category object"""
category = Category.objects.filter(id=pk).first()
category.delete()
return Response(
{"message": f"Category '{category}' deleted succesfully"},
status=status.HTTP_204_NO_CONTENT
)
|
nilq/baby-python
|
python
|
import streamlit as st
import pandas as pd
import os
import math
st.set_page_config(
page_title="ID4D", layout="wide"
)
#st.write(os.listdir('.'))
open('test.tmp','w').write('test')
st.sidebar.write('The following app will help to select standards should be utilized as part of a foundational identity system.')
st.sidebar.write('The answers provided below will customise the standards list.')
#modalities=st.sidebar.select_slider("Population Size",['1-5M','5-50M','50-100M','100M+'])
apptype=st.sidebar.multiselect("Applications Required",['Foundational ID','Population Registry'])
modalities=st.sidebar.multiselect("Attributes",['Face','Fingerprint','Iris'])
if st.sidebar.checkbox('Require mobile applications',False):
modalities+=['Mobile']
show_link = st.sidebar.checkbox('Show link to Standard')
standards = pd.read_csv('standards.csv')
#df = pd.DataFrame({'a':[1,2,3,4],'b':[1,2,3,4]})
#st.write(modalities)
# 
checked={}
if modalities and apptype:
with st.expander('Settings',True):
st.write(f'''
# Standards Requirements
The following are base level requirements that are recommended for a
foundational ID having attributes
*{', '.join(modalities)}*
''')
last_cat = ""
# modalities.extend(['All'])
standards=standards[standards['Modality'].isin(modalities+['All'])]
standards=standards.sort_values('Category')
for row in standards.itertuples():
if type(row.Standard)==type(''):
if row.Category!=last_cat:
st.header(row.Category)
cols = st.columns(4)
checked[row.Standard]=cols[0].checkbox(row.Standard)
cols[1].write('**'+row.Standard+'**')
cols[2].write(row.Description)
if row.Modality!='All':
cols[2].write('Attribute :'+row.Modality)
if show_link:
cols[3].write(f"[Link]({row.Link})")
last_cat = row.Category
with st.expander('final'):
st.write(checked)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# jacobian column s0 s1 e0 e1 w0 w1 w2
# -----------------------------------------
# Imports
# -----------------------------------------
import pdb
import os # used to clear the screen
import math
from numpy import *
from numpy.linalg import *
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
from baxter_pykdl import baxter_kinematics
from rbx1_nav.transform_utils import quat_to_angle # Convert quaternions to euler
import geometry_msgs
import baxter_core_msgs.msg
import PyKDL
from std_msgs.msg import Float32,ColorRGBA
import dynamic_reconfigure.client
from std_msgs.msg import Empty
import copy
# -----------------------------------------
# Local Methods
# -----------------------------------------
class torque(object):
def __init__(self):
print 'initial'
self.enable_Baxter()
self.jNamesR = ['right_s0', 'right_s1', 'right_w0', 'right_w1', 'right_w2', 'right_e0', 'right_e1']
self.rLimb = baxter_interface.limb.Limb("right")
self.rKin = baxter_kinematics('right')
self.pub2 = rospy.Publisher('/baxter/error', ColorRGBA, queue_size=10)
# Call routine to enable the robot
# position PID
self._E_pre_position=matrix([[0],[0],[0]])
self._E_all_position=matrix([[0],[0],[0]])
self.refposition=matrix([[0.7],[-0.5],[-0.14]])
# force PID
self._E_pre_force=matrix([[0],[0],[0],[0],[0],[0]])
self._E_all_force=matrix([[0],[0],[0],[0],[0],[0]])
self.force_torque=matrix([[0],[0],[0],[0],[0],[0],[0]])
self.refforce=matrix([[-0.03],[0],[0],[0],[0],[0]])
# keep static PID 0.01 0 0.1
self._E_pre=matrix([[0],[0],[0],[0],[0],[0],[0]])
self._E_all=matrix([[0],[0],[0],[0],[0],[0],[0]])
# self.static_torque=matrix([[0],[0],[0],[0],[0],[0],[0]])
# [-0.5522330830078125, 0.21667478604125978, -0.03413107249145508, 1.4714710690979005, -1.699267215838623, -0.14726215546875002, 1.4450099005371095]
self.test_msg=ColorRGBA()
self.count = 0
self.initial_position()
side = 'right'
print("Suppressing Gravity compensation for the {} arm...".format(side))
gravity_topic='/robot/limb/{}/suppress_gravity_compensation'.format(side)
self.gravity_pub = rospy.Publisher(gravity_topic, Empty, queue_size=10)
self.gravity_msg = Empty()
start = rospy.Time.now().to_sec()
rospy.Timer(rospy.Duration(0.00125), self.suppress_gravity_compensation)
self.refvel = matrix([[0],[0],[0],[0],[0],[0],[0]])
self.static_torque = matrix([[0.001],[0.001],[0.001],[0.001],[0.001],[0.001],[0.001]])
self.rLimb.set_command_timeout(0.00125)
self.sub = rospy.Subscriber("/robot/limb/right/gravity_compensation_torques", baxter_core_msgs.msg.SEAJointState, self.get_static_torque)
# note
def initial_position(self):
print "initial position"
Rposition = matrix([[-1],[0.217],[-0.034],[1.471],[-1.699],[-0.147],[1.445]])
Rposition_dict=dict(zip(self.jNamesR,self.change_order(Rposition.tolist())))
self.rLimb.move_to_joint_positions(Rposition_dict)
def suppress_gravity_compensation(self,event):
self.gravity_pub.publish(self.gravity_msg)
"""
print self.count
static_torque = matrix([[-1],[0],[0],[0],[0],[0],[0]])
print static_torque
static_torque_dict=dict(zip(self.jNamesR,self.change_order(static_torque.tolist())))
print static_torque_dict
self.rLimb.set_joint_torques(static_torque_dict)
self.count = self.count + 1
"""
def enable_Baxter(self):
# Enable the robot's arms
print("Getting robot state...")
self.rs = baxter_interface.RobotEnable(CHECK_VERSION)
self.init_state=self.rs.state().enabled
print("Enabling robot...")
self.rs.enable()
def change_order(self, s0s1e0e1w0w1w2):
return [s0s1e0e1w0w1w2[0][0],s0s1e0e1w0w1w2[1][0],s0s1e0e1w0w1w2[4][0],s0s1e0e1w0w1w2[5][0],s0s1e0e1w0w1w2[6][0],s0s1e0e1w0w1w2[2][0],s0s1e0e1w0w1w2[3][0]]
def get_position_vel(self):
self._postion_vel_kp=rospy.get_param('/dynamic_pid_tutorials/p_param_position')
self._postion_vel_ki=rospy.get_param('/dynamic_pid_tutorials/i_param_position')
self._postion_vel_kd=rospy.get_param('/dynamic_pid_tutorials/d_param_position')
print 'position_kp', self._postion_vel_kp, 'position_ki', self._postion_vel_ki, 'position_kd', self._postion_vel_kd
actual_position = matrix([\
self.rLimb.endpoint_pose()['position'].x, self.rLimb.endpoint_pose()['position'].y, self.rLimb.endpoint_pose()['position'].z\
]).T
E_position = self.refposition - actual_position
print 'Error_position is' ,E_position
self._E_all_position = self._E_all_position + E_position
position_vel = pinv(self.rKin.jacobian()[0:3,0:7]) * (self._postion_vel_kp * E_position + self._postion_vel_ki * self._E_all_position\
+ self._postion_vel_kd * ( E_position - self._E_pre_position))
self._E_pre_position = E_position
return position_vel
def get_force_torque(self):
self._force_kp=rospy.get_param('/dynamic_pid_tutorials/p_param_force')
self._force_ki=rospy.get_param('/dynamic_pid_tutorials/i_param_force')
self._force_kd=rospy.get_param('/dynamic_pid_tutorials/d_param_force')
print 'force_kp', self._force_kp, 'force_ki', self._force_ki, 'force_kd', self._force_kd
actual_force = matrix([\
self.rLimb.endpoint_effort()['force'].x, self.rLimb.endpoint_effort()['force'].y, self.rLimb.endpoint_effort()['force'].z,\
self.rLimb.endpoint_effort()['torque'].x, self.rLimb.endpoint_effort()['torque'].y, self.rLimb.endpoint_effort()['torque'].z\
]).T
ddotE = self.refforce - actual_force
print 'Error_effort is' , ddotE
cartesian_inertia = self.rKin.cart_inertia()
self._E_all_force = self._E_all_force + ddotE
self.force_torque =self.force_torque + self.rKin.jacobian_transpose() * cartesian_inertia * \
(self._force_kp * ddotE + self._force_ki * self._E_all_force\
+ self._force_kd *(ddotE - self._E_pre_force))
self._E_pre_force = ddotE
return self.force_torque
def get_static_torque(self , SEAJointState):
# os.system('clear')
actual_effort = matrix(SEAJointState.actual_effort).T
actual_position = matrix(SEAJointState.actual_position).T
actual_velocity = matrix(SEAJointState.actual_velocity).T
ref_effort = matrix([[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0]])
ref_vel = matrix([[0.1],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0]])
_E_now = ref_vel - actual_velocity
self._E_all = self._E_all + _E_now
self._kp=[60,1,0.1,0.1,0.2,1.5,0.1]
self._ki=[0,0.0,0.0,0.0,0.0,0.0,0.0]
self._kd=[0.2,0.0,0.0,0.0,0.0,0.0,0.0]
for i in range (0,7):
self.static_torque[i] = 0.0
for i in range (0,1):
self.static_torque[i] = self.static_torque[i] + self._kp[i] * _E_now[i] + self._kd[i] * (_E_now[i] -self._E_pre[i]) + self._ki[i] * self._E_all[i]
self._E_pre = _E_now
# output
static_torque_dict=dict(zip(self.jNamesR,self.change_order(self.static_torque.tolist())))
print static_torque_dict
self.rLimb.set_joint_torques(static_torque_dict)
self.count = self.count + 1
print self.count
def static_position_control(self, SEAJointState):
os.system('clear')
self._kp=rospy.get_param('/dynamic_pid_tutorials/p_param_static')
self._ki=rospy.get_param('/dynamic_pid_tutorials/i_param_static')
self._kd=rospy.get_param('/dynamic_pid_tutorials/d_param_static')
print 'static kp', self._kp, 'static ki', self._ki, 'static kd', self._kd
actual_velocity = matrix(SEAJointState.actual_velocity).T
# get the design velocity
position_vel = self.get_position_vel()
_E_now = position_vel - actual_velocity
self._E_all = self._E_all + _E_now
self.static_torque = self.static_torque + self._kp * _E_now + self._kd * (_E_now -self._E_pre) + self._ki * self._E_all
self._E_pre = _E_now
static_torque_dict=dict(zip(self.jNamesR,self.change_order(self.static_torque.tolist())))
self.rLimb.set_joint_torques(static_torque_dict)
self.count = self.count + 1
print self.count
def static_position_force_control(self, SEAJointState):
self._kp=rospy.get_param('/dynamic_pid_tutorials/p_param_static')
self._ki=rospy.get_param('/dynamic_pid_tutorials/i_param_static')
self._kd=rospy.get_param('/dynamic_pid_tutorials/d_param_static')
print 'static kp', self._kp, 'static ki', self._ki, 'static kd', self._kd
self.actual_velocity = matrix(SEAJointState.actual_velocity).T
#增量PID
position_vel = self.get_position_vel()
_E_now = position_vel - self.actual_velocity
self._E_all = self._E_all + _E_now
self.static_torque = self.static_torque + self._kp * _E_now + self._kd * (_E_now -self._E_pre) + self._ki * self._E_all
self._E_pre = _E_now
force_torque = self.get_force_torque()
s_p_f_torque = self.static_torque + force_torque
static_torque_dict=dict(zip(self.jNamesR,self.change_order(s_p_f_torque.tolist())))
self.rLimb.set_joint_torques(static_torque_dict)
self.count = self.count + 1
print self.count
def static_force_control(self, SEAJointState):
self._kp=rospy.get_param('/dynamic_pid_tutorials/p_param_static')
self._ki=rospy.get_param('/dynamic_pid_tutorials/i_param_static')
self._kd=rospy.get_param('/dynamic_pid_tutorials/d_param_static')
print 'static kp', self._kp, 'static ki', self._ki, 'static kd', self._kd
self.actual_velocity = matrix(SEAJointState.actual_velocity).T
# 增量PID
_E_now = - self.actual_velocity
self._E_all = self._E_all + _E_now
self.static_torque = self.static_torque + self._kp * _E_now + self._kd * (_E_now -self._E_pre) + self._ki * self._E_all
self._E_pre = _E_now
force_torque = self.get_force_torque()
s_f_torque = self.static_torque + force_torque
static_torque_dict=dict(zip(self.jNamesR,self.change_order(s_f_torque.tolist())))
self.rLimb.set_joint_torques(static_torque_dict)
self.count = self.count + 1
print self.count
def main():
# Initialize node
rospy.init_node('torque_control_static_')
print 'start'
settorque=torque()
rospy.spin()
if __name__ == "__main__":
try:
# pdb.set_trace()
main()
except:
rospy.loginfo("example_baxter_kins_right node terminated.")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0005_imagedetails'),
]
operations = [
migrations.CreateModel(
name='GoodsList',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(verbose_name='标题', max_length=80, null=True)),
('add_time', models.DateTimeField(verbose_name='添加时间', auto_now_add=True)),
('summary', models.CharField(verbose_name='摘要', max_length=200)),
('click_times', models.IntegerField(verbose_name='点击次数')),
('img_src', models.CharField(verbose_name='图片url', max_length=80)),
('sell_price', models.FloatField()),
('market_price', models.FloatField()),
('stock_quantity', models.IntegerField()),
],
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
__author__ = "blueShard (ByteDream)"
__license__ = "MPL-2.0"
__version__ = "1.1"
# Startscript um zu check, ob python3, pip3 + alle benötigten externen python3 libraries installiert sind (wenn nicht wird das benötigte nachinstalliert), das danach die main.py startet:
"""
#!/bin/bash
which python3 &> /dev/null
[ $? -eq 0 ] || apt-get -y install python3
which pip3 &> /dev/null
[ $? -eq 0 ] || apt-get -y install python3-pip
python3 -c "import aiogram" &> /dev/null
[ $? -eq 0 ] || yes | pip3 install aiogram 1> /dev/null
python3 -c "import discord" &> /dev/null
[ $? -eq 0 ] || yes | pip3 install discord 1> /dev/null
python3 -c "import webuntis" &> /dev/null
[ $? -eq 0 ] || yes | pip3 install webuntis 1> /dev/null
python3 main.py <discord api token> <telegram api token> <webuntis username> <webuntis password>
"""
import asyncio
import discord # https://github.com/Rapptz/discord.py
import logging
import xml.etree.ElementTree as ET
from aiogram import Bot, Dispatcher, types # https://github.com/aiogram/aiogram
from datetime import date, datetime, time, timedelta
from math import ceil
from random import choice
from sys import argv
from traceback import format_exc
from webuntis import Session # https://github.com/python-webuntis/python-webuntis
from xml.dom import minidom
# logging.basicConfig(format="[%(asctime)s] %(levelname)s: %(message)s", level=logging.INFO)
logging.basicConfig(handlers=[logging.StreamHandler(), logging.FileHandler("/var/log/ScheduleAndMoreBot.log", "a+")], format="[%(asctime)s] %(levelname)s: %(message)s", level=logging.INFO)
logging.info("Start logging")
class ScheduleAnMoreBot(discord.Client):
telegram_bot = Bot(token=argv[2])
dispatcher = Dispatcher(telegram_bot)
def __init__(self, ignore_discord: bool = False, **options) -> None:
super().__init__(**options)
self.ignore_discord = ignore_discord
self.info_file = "infos.txt"
self.discord_utils = DiscordUtils()
self.discord_channel = None
self.telegram_utils = TelegramUtils()
self.dispatcher.register_message_handler(self.telegram_private, commands=["private"])
self.dispatcher.register_message_handler(self.telegram_example, commands=["example"])
self.dispatcher.register_message_handler(self.telegram_help, commands=["help"])
self.dispatcher.register_message_handler(self.telegram_add_info, commands=["add_info"])
self.dispatcher.register_message_handler(self.telegram_info, commands=["info"])
self.dispatcher.register_message_handler(self.telegram_source, commands=["src", "source"])
if self.ignore_discord:
loop = asyncio.get_event_loop()
loop.create_task(self.dispatcher.start_polling(self.dispatcher))
loop.create_task(Checker(None, self.telegram_bot, self.telegram_utils.group_id).main())
# ----- Discord ----- #
async def on_ready(self) -> None:
logging.info("Connected to Discord server")
async def on_message(self, message: discord.Message) -> None:
user_input = message.content.lower().strip()
if not user_input.startswith("$"):
return
elif self.discord_channel is None:
if message.content.lower().strip() == "$start" and message.channel.id == self.discord_utils.channel_id:
self.discord_channel = message.channel
await message.channel.send("Der Bot wurde aktiviert")
if not self.ignore_discord:
loop = asyncio.get_event_loop()
loop.create_task(self.dispatcher.start_polling(self.dispatcher))
loop.create_task(Checker(self.discord_channel, self.telegram_bot, self.telegram_utils.group_id).main())
else:
await message.channel.send("Tippe '$start' im richtigen Channel um den Bot zu aktivieren")
else:
user_input = user_input[1:]
# switch-case wäre schon :p
if user_input == "help":
await self.discord_help(message)
elif user_input == "example":
await self.discord_example(message)
elif user_input.startswith("add_info"):
await self.discord_add_info(message)
elif user_input == "info":
await self.discord_info(message)
elif user_input in ["src", "source"]:
await self.discord_source(message)
else:
await message.channel.send("Tippe '$help' für Hilfe")
async def discord_help(self, message: discord.Message) -> None:
"""Zeigt alle Discord Befehle + Information was diese tuhen an"""
if self.discord_utils.is_valid_channel(message.channel) or self.discord_utils.is_valid_user(self.discord_channel, message.author):
await message.channel.send(self.discord_utils.help())
else:
await message.channel.send(self.discord_utils.not_permitted())
async def discord_example(self, message: discord.Message) -> None:
"""Zeigt Beispiele zu allen Discord Befehlen, wie man diese nutzt"""
if self.discord_utils.is_valid_channel(message.channel) or self.discord_utils.is_valid_user(self.discord_channel, message.author):
await message.channel.send(self.discord_utils.example())
else:
await message.channel.send(self.discord_utils.not_permitted())
async def discord_add_info(self, message: discord.Message) -> None:
"""Fügt eine neue Info hinzu"""
if self.discord_utils.is_valid_channel(message.channel) or self.discord_utils.is_valid_user(self.discord_channel, message.author):
command_no_space = message.content.replace(" ", "")
infos = Infos()
full_date = datetime.today()
today = datetime(full_date.year, full_date.month, full_date.day) # hier wird auf die genau Uhrzeit verzichtet, damit man noch anträge für den selben Tag erstellen kann
date_for_info = command_no_space[9:19].split("-")
for index, x in enumerate(date_for_info):
if x.startswith("0"):
date_for_info[index] = x[1:]
try:
if today > datetime(int(date_for_info[2]), int(date_for_info[1]), int(date_for_info[0])):
await message.channel.send("Das Datum liegt in der Vergangenheit")
return
else:
date = command_no_space[9:19]
information = message.content.replace("$add_info", "", 1).replace(command_no_space[9:19], "", 1).strip()
infos.addappend(date, information)
for embed in self.discord_utils.embed_info(date, information):
await self.discord_channel.send(embed=embed)
await self.telegram_bot.send_message(self.telegram_utils.group_id, "Eine neue Info für " + date + " wurde hinzugefügt: " + information)
logging.info("New entry for date " + date + " was added: " + information)
except (IndexError, SyntaxError, ValueError):
await message.channel.send("Es wurde kein richtiges Datum angegeben")
logging.warning("An error occurred while trying to add a new information:\n" + format_exc())
async def discord_info(self, message: discord.Message) -> None:
"""Zeigt alle Infos an"""
if self.discord_utils.is_valid_channel(message.channel) or self.discord_utils.is_valid_user(self.discord_channel, message.author):
infos = Infos()
all_colors = [discord.Color.blue(),
discord.Color.blurple(),
discord.Color.dark_blue(),
discord.Color.dark_gold(),
discord.Color.darker_grey(),
discord.Color.dark_green(),
discord.Color.dark_grey(),
discord.Color.dark_magenta(),
discord.Color.dark_orange(),
discord.Color.dark_purple(),
discord.Color.dark_red(),
discord.Color.dark_teal(),
discord.Color.default()]
choosed_colors = []
for child in infos.root:
info = infos.get(child.tag)
separator = info.split("~", 1)[0]
day_infos = info.replace("~", "", 1).split(separator)[1:]
if len(choosed_colors) >= len(all_colors):
choosed_colors = []
color = choice(all_colors)
while color in choosed_colors:
color = choice(all_colors)
discord_info = discord.Embed(title="Infos für " + child.tag[1:], color=color)
# discord_info.set_image(url="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e4/Infobox_info_icon.svg/2000px-Infobox_info_icon.svg.png")
discord_info.set_thumbnail(url="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e4/Infobox_info_icon.svg/2000px-Infobox_info_icon.svg.png")
for index, day_info in enumerate(day_infos):
if len(day_info) > 1000:
for x in range(0, ceil(len(day_info) / 1000)):
if x % 6:
await message.channel.send(embed=discord_info)
discord_info.clear_fields()
discord_info.add_field(name=str(index + 1) + "/" + str(x), value=day_info[x * 1000:(x + 1) * 1000], inline=False)
else:
discord_info.add_field(name=str(index + 1), value=day_info, inline=False)
await message.channel.send(embed=discord_info)
async def discord_source(self, message: discord.Message) -> None:
"""Stellt den Source Code zu Verfügung"""
await message.channel.send(file=discord.File("main.py", filename="main.py"))
# ----- Telegram ----- #
async def telegram_private(self, message: types.Message) -> None:
"""Fügt einen Telegram Nutzer zur liste hinzu, damit dieser per DM mit dem Bot interagieren"""
if self.telegram_utils.is_valid_group(message.chat):
if not self.telegram_utils.is_private_user(message.from_user):
user_id = message.from_user.id
self.telegram_utils.private_users_id.append(user_id)
with open(self.telegram_utils.private_users_file, "a+") as file:
file.write(str(user_id) + ";")
file.close()
await message.answer("Neuer Nutzer wurde eingetragen")
logging.info("New private telegram user registered")
else:
await message.answer(self.telegram_utils.not_permitted())
async def telegram_help(self, message: types.Message) -> None:
"""Zeigt alle Telegram Befehle + Information was diese tuhen an"""
if self.telegram_utils.is_valid_group(message.chat) or self.telegram_utils.is_private_user(message.from_user):
await message.answer(self.telegram_utils.help(), parse_mode="MarkdownV2")
else:
await message.answer(self.telegram_utils.not_permitted())
async def telegram_example(self, message: types.Message) -> None:
"""Zeigt Beispiele zu allen Telegram Befehlen, wie man diese nutzt"""
if self.telegram_utils.is_valid_group(message.chat) or self.telegram_utils.is_private_user(message.from_user):
await message.answer(self.telegram_utils.example(), parse_mode="MarkdownV2")
else:
await message.answer(self.telegram_utils.not_permitted())
async def telegram_add_info(self, message: types.Message) -> None:
"""Fügt eine neue Info hinzu"""
if self.telegram_utils.is_valid_group(message.chat) or self.telegram_utils.is_private_user(message.from_user):
infos = Infos()
message_no_space = message.text.replace(" ", "")
full_date = datetime.today()
today = datetime(full_date.year, full_date.month, full_date.day) # hier wird auf die genau Uhrzeit verzichtet, damit man noch anträge für den selben Tag erstellen kann
date_for_info = message_no_space[9:19].split("-")
for index, x in enumerate(date_for_info):
if x.startswith("0"):
date_for_info[index] = x[1:]
try:
if today > datetime(int(date_for_info[2]), int(date_for_info[1]), int(date_for_info[0])):
await message.answer("Das Datum liegt in der Vergangenheit")
return
else:
date = message_no_space[9:19]
information = message.text.replace("/add_info", "", 1).replace(date, "", 1).strip()
infos.addappend(date, information)
await self.telegram_bot.send_message(self.telegram_utils.group_id, "Eine neue Info für " + date + " wurde hinzugefügt: " + information)
for embed in self.discord_utils.embed_info(date, information):
await self.discord_channel.send(embed=embed)
logging.info("New entry for date " + date + " was added: " + information)
except (IndexError, SyntaxError, ValueError):
await message.answer("Es wurde kein richtiges Datum angegeben")
else:
await message.answer(self.telegram_utils.not_permitted())
async def telegram_info(self, message: types.Message) -> None:
"""Zeigt alle Infos an"""
if self.telegram_utils.is_valid_group(message.chat) or self.telegram_utils.is_private_user(message.from_user):
infos = Infos()
information = ""
for child in infos.root:
info = infos.get(child.tag)
info.replace(info.split("~", 1)[0], "\n\n")
information = information + child.tag[1:] + ": " + info.split("~", 1)[1]
await message.answer(information)
information = ""
else:
await message.answer(self.telegram_utils.not_permitted())
async def telegram_source(self, message: types.Message) -> None:
"""Stellt den Source Code zu Verfügung"""
if self.telegram_utils.is_valid_group(message.chat) or self.telegram_utils.is_private_user(message.from_user):
await message.answer_document(document=open("main.py", "rb"))
else:
await message.answer(self.telegram_utils.not_permitted())
class DiscordUtils:
def __init__(self) -> None:
self.channel_id = 746369803941576784
# Test: 746477001237594174
def embed_info(self, date, info) -> list:
"""Erstellt discord embeds für die gegeben info"""
return_list = []
all_colors = [discord.Color.blue(),
discord.Color.blurple(),
discord.Color.dark_blue(),
discord.Color.dark_gold(),
discord.Color.darker_grey(),
discord.Color.dark_green(),
discord.Color.dark_grey(),
discord.Color.dark_magenta(),
discord.Color.dark_orange(),
discord.Color.dark_purple(),
discord.Color.dark_red(),
discord.Color.dark_teal(),
discord.Color.default()]
choosed_colors = []
if len(choosed_colors) >= len(all_colors):
choosed_colors = []
color = choice(all_colors)
while color in choosed_colors:
color = choice(all_colors)
discord_info = discord.Embed(title="Eine neue Info für " + date + " wurde hinzugefügt", color=color)
# discord_info.set_image(url="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e4/Infobox_info_icon.svg/2000px-Infobox_info_icon.svg.png")
discord_info.set_thumbnail(url="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e4/Infobox_info_icon.svg/2000px-Infobox_info_icon.svg.png")
if len(info) > 1000:
for x in range(0, ceil(len(info) / 1000)):
if x % 6:
return_list.append(discord_info)
discord_info.clear_fields()
discord_info.add_field(name="Info" + "/" + str(x), value=info[x * 1000:(x + 1) * 1000], inline=False)
else:
discord_info.add_field(name="Info", value=info, inline=False)
return_list.append(discord_info)
return return_list
def example(self) -> str:
"""Discord Text, der Beispiele zu allen Befehlen zeigt"""
example_text = "```\n" \
"$start $start\n" \
"$help $help\n" \
"$example $example\n" \
"$add_info [dd-mm-yyyy] [info] $add_info 01-01-2222 Eine einfache test Info\n" \
"$info $info\n" \
"$src / $source $src\n" \
"```"
return example_text
def help(self) -> str:
"""Discord Text, der Hilfe zu allen Befehlen zeigt"""
help_text = "```\n" \
"DM (direct message) = Nur per Direktnachticht ausführbar\n" \
"SC (source channel) = Nur vom Channel von dem aus der Bot gestartet wurde ausführbar\n" \
"EV (everywhere) = Von überall ausführbar\n\n" \
"Befehlsname Von wo ausführbar Beschreibung\n\n" \
"$start SC Startet den Bot\n\n" \
"$help EV Zeigt Hilfe zu den vorhanden Befehlen an\n" \
"$example EV Zeigt beispiele für jeden Befehl\n" \
"$add_info [dd-mm-yyyy] [info] EV Fügt neue Informationen zu einem bestimmten Tag hinzu\n" \
"$info EV Gibt eingetragene infos wieder\n" \
"$src / $source EV Stellt die Datei mit dem Quellcode zu Verfügung\n" \
"```"
return help_text
def is_valid_channel(self, channel: discord.TextChannel) -> bool:
"""Checkt, ob der gegebene Channel der Channel ist, auf dem der Bot aktiv sein soll"""
try:
if channel.id == self.channel_id:
return True
else:
return False
except AttributeError:
return False
def is_valid_user(self, channel: discord.TextChannel, user: discord.User) -> bool:
"""Überprüft, ob der Nutzer auf dem Discord Server Mitglied ist"""
print(user.id, channel.members)
try:
for member in channel.members:
if user.id == member.id:
return True
except AttributeError:
logging.warning("Attribute error occurred while trying to check if discord user is valid")
return False
def not_permitted(self) -> str:
"""Info, wenn eine nicht berechtigte Person einen Discord Befehl ausführt"""
return "Nur Personen, die Mitglieder auf dem Discord Server sind, haben Zugriff auf die Befehle"
class TelegramUtils:
def __init__(self) -> None:
self.group_id = -384078711
self.private_users_file = "private_users.scv"
self.private_users_id = open(self.private_users_file, "r+").readline().split(";")
def example(self) -> str:
"""Telegram Text, der Beispiele zu allen Befehlen zeigt"""
example_text = "```\n" \
"/start\n" \
"/start\n\n" \
"/help\n" \
"/help\n\n" \
"/example\n" \
"/example\n\n" \
"/add_info [dd-mm-yyyy] [info]\n" \
"/add_info 01-01-2222 Eine einfache test Info\n\n" \
"/info\n" \
"/info\n\n" \
"/src or /source\n" \
"/src\n" \
"```"
return example_text
def help(self) -> str:
"""Discord Text, der Hilfe zu allen Befehlen zeigt"""
help_text = "```\n" \
"DM (direct message) = Nur per Direktnachticht ausführbar\n" \
"GR (group) = Nur vom Channel von dem aus der Bot gestartet wurde ausführbar\n" \
"EV (everywhere) = Von überall ausführbar\n\n" \
"/private\n" \
"GR\n" \
"Nutzer bekommt Zugriff auf Befehle, die per DM ausgeführt werden können\n\n\n" \
"/help\n" \
"EV\n" \
"Zeigt Hilfe zu den vorhanden Befehlen an\n\n" \
"/example\n" \
"EV\n" \
"Zeigt Hilfe zu den vorhanden Befehlen an\n\n" \
"/add_info [dd-mm-yyyy] [info]\n" \
"EV\n" \
"Fügt neue Informationen zu einem bestimmten Tag hinzu\n\n" \
"/info\n" \
"EV\n" \
"Gibt eingetragene Infos wieder\n\n\n" \
"/src or /source\n" \
"EV\n" \
"Stellt die Datei mit dem Quellcode zu Verfügung\n" \
"```"
return help_text
def is_private_user(self, user: types.User) -> bool:
"""Überprüft, ob der Nutzer '/private' in der Gruppe eingegeben hat"""
if str(user.id) in self.private_users_id:
return True
else:
return False
def is_valid_group(self, chat: types.Chat) -> bool:
"""Checkt, ob die gegeben Gruppe die Gruppe ist, worin der Bot aktiv sein soll"""
if chat.id == self.group_id:
return True
else:
return False
def not_permitted(self) -> str:
"""Info, wenn eine nicht berechtigte Person einen Telegram Befehl ausführt"""
return "Gebe '/private' in der Gruppe ein, um Zugriff auf Befehle, die per DM ausgeführt werden können, zu erhalten"
class Infos: # wird eventuell in Zukunft durch ein lua programm ersetzt
def __init__(self, info_file: str = "infos.xml") -> None:
self.info_file = info_file
self.root = ET.fromstring("".join([item.replace("\n", "").strip() for item in [line for line in open(info_file, "r")]]))
def __create_separator(self, text: str) -> str:
"""Erstellt ein separator"""
indicator = "^|^"
choices = ("§", "!", "^")
while True:
if indicator in text:
list_choice = choice(choices)
splitted_indicator = indicator.split("|")
indicator = splitted_indicator[0] + list_choice + "|" + list_choice + splitted_indicator[1]
else:
return indicator
def _prettify(self, string: str = None) -> str:
"""Macht den XML Tree lesbarer für Menschis^^"""
if string is None:
reparsed = minidom.parseString(ET.tostring(self.root, "utf-8"))
else:
reparsed = minidom.parseString(string)
pre_output = reparsed.toprettyxml(indent=" ")
return "\n".join(pre_output.split("\n")[1:])
def addappend(self, date_: str, text: str) -> None:
"""Fügt einen neuen Eintrag zum gegebenen Datum hinzu"""
date_ = "_" + date_
for child in self.root:
if child.tag == date:
child_text = child.text
old_separator = child.attrib["separator"]
new_separator = self.__create_separator(child_text + text)
child.text = child.text.replace(old_separator, new_separator) + new_separator + text
child.attrib["separator"] = new_separator
self.write()
return
new_entry = ET.Element(date_)
new_entry.text = text
new_entry.attrib["separator"] = self.__create_separator(text)
self.root.append(new_entry)
self.write()
def delete(self, date_: str) -> None:
"""Löscht alle Einträge an dem gegeben Datum"""
for child in self.root:
if child.tag == date_:
self.root.remove(child)
self.write()
return
def get(self, date_: str) -> str:
"""Gibt alle Einträge an dem gegeben Datum zurück"""
for child in self.root:
if child.tag == date_:
return child.attrib["separator"] + "~" + child.text
return ""
def write(self) -> None:
"""Schreibt den XML Tree in die Datei"""
with open(self.info_file, "w+") as file:
file.write(self._prettify())
file.close()
class Checker:
def __init__(self, discord_channel: discord.TextChannel, telegram_bot: Bot, telegram_group_id: int):
self.discord_channel = discord_channel
self.telegram_bot = telegram_bot
self.telegram_group_id = telegram_group_id
self.lessons = {"1": [time(8, 0,), time(8, 45)],
"2": [time(8, 45), time(9, 30)],
"3": [time(9, 45), time(10, 30)],
"4": [time(10, 30), time(11, 15)],
"5": [time(11, 45), time(12, 30)],
"6": [time(12, 30), time(13, 15)],
"7": [time(13, 30), time(14, 15)],
"8": [time(14, 15), time(15, 0)]}
self.all_cancelled_lessons_thursday = {}
self.all_ignored_lessons_thursday = {}
self.which_thursday = date.today()
self.all_cancelled_lessons_friday = {}
self.all_ignored_lessons_friday = {}
self.which_friday = date.today()
self.session: Session = None
async def __check_and_send_cancelled_lessons(self, date_to_check: date) -> None: # die methode ist etwas schwer zu lesen
"""Überprüft, ob Stunden ausfallen / verlegt wurden und gibt das Ergebnis (wenn es eins gibts) in Discord und Telegram wieder"""
try:
embed = None
all_embed_fields = {}
all_telegram_messages = {}
telegram_message = ""
if date_to_check.weekday() == 3:
already_cancelled_lessons: dict = self.all_cancelled_lessons_thursday
all_ignored_lessons: dict = self.all_ignored_lessons_thursday
weekday_in_german = "Donnerstag"
elif date_to_check.weekday() == 4:
already_cancelled_lessons: dict = self.all_cancelled_lessons_friday
all_ignored_lessons: dict = self.all_ignored_lessons_friday
weekday_in_german = "Freitag"
else:
raise ValueError('date_to_check (datetime.date) must be thursday or friday')
timetable = self.session.timetable(start=date_to_check, end=date_to_check, klasse=2015)
for lesson in timetable:
lesson_number = str(lesson.start.time().strftime("%H:%M")) + " Uhr - " + str(lesson.end.time().strftime("%H:%M") + " Uhr")
for lesson_num, lesson_time in self.lessons.items():
if lesson_time[0] == lesson.start.time():
lesson_number = lesson_num
break
embed_title = "Stunden Ausfall Information für " + weekday_in_german + ", den " + date_to_check.strftime("%d.%m.%Y")
if lesson.code == "irregular" and lesson_number not in all_ignored_lessons.keys() and lesson.teachers not in all_ignored_lessons.values():
embed = discord.Embed(title=embed_title, color=discord.Color.from_rgb(77, 255, 77))
for lesson1 in timetable:
if lesson.teachers == lesson1.teachers and lesson.start is not lesson1.start and lesson1.code == "cancelled":
lesson1_number = str(lesson.start.time().strftime("%H:%M")) + " Uhr - " + str(lesson.end.time().strftime("%H:%M") + " Uhr")
for lesson_num, lesson_time in self.lessons.items():
if lesson_time[0] == lesson1.start.time():
lesson1_number = lesson_num
break
for number in list(all_embed_fields.keys()): # wenn es ohne list gemacht werden würde, würde ein RuntimeError kommen
if number in [lesson_number, lesson1_number]:
del all_embed_fields[number]
del all_telegram_messages[number]
if len(lesson1_number) == 1:
all_embed_fields[lesson_number] = {lesson1_number + ". Stunde wurde zur " + lesson_number +
". Stunde umverlegt": "Die " + lesson1_number + ". Stunde (" + lesson1.start.time().strftime("%H:%M") + " Uhr - " + lesson1.end.time().strftime("%H:%M") + " Uhr) bei " + \
", ".join([teacher.long_name for teacher in lesson.teachers]) + " wurde zur " + lesson_number + ". Stunde (" + lesson.start.time().strftime("%H:%M") + \
" Uhr - " + lesson.end.time().strftime("%H:%M") + " Uhr) umverlegt"}
all_telegram_messages[lesson_number] = "Die " + lesson1_number + ". Stunde (" + lesson1.start.time().strftime("%H:%M") + " Uhr - " + lesson1.end.time().strftime("%H:%M") + " Uhr) bei " + \
", ".join([teacher.long_name for teacher in lesson.teachers]) + " wurde zur " + lesson_number + ". Stunde (" + lesson.start.time().strftime("%H:%M") + \
" Uhr - " + lesson.end.time().strftime("%H:%M") + " Uhr) umverlegt"
else:
all_embed_fields[lesson_number] = {"Die Stunde " + lesson1_number + " wurde zur Stunde" + lesson_number +
" umverlegt": "Die Stunde " + lesson1_number + " bei " + ", ".join([teacher.long_name for teacher in lesson.teachers]) + " wurde zur Stunde " + lesson_number + " umverlegt"}
all_telegram_messages[lesson_number] = "Die Stunde " + lesson1_number + " bei " + ", ".join([teacher.long_name for teacher in lesson.teachers]) + " wurde zur Stunde " + lesson_number + " umverlegt"
all_ignored_lessons[lesson_number] = lesson.teachers
all_ignored_lessons[lesson1_number] = lesson.teachers
elif lesson.code == "cancelled":
embed = discord.Embed(title=embed_title, color=discord.Color.from_rgb(255, 0, 0))
if lesson_number not in already_cancelled_lessons.keys() and lesson_number not in all_ignored_lessons.keys():
already_cancelled_lessons[lesson_number] = lesson.teachers
if len(lesson_number) == 1:
all_embed_fields[lesson_number] = {"Ausfall " + str(lesson_number) + ". Stunde (" + lesson.start.time().strftime("%H:%M") + " Uhr - " +
lesson.end.time().strftime("%H:%M") + " Uhr)": "Ausfall bei " + ", ".join([teacher.long_name for teacher in lesson.teachers]) + " in " +
", ".join([subject.long_name for subject in lesson.subjects])}
all_telegram_messages[lesson_number] = "Ausfall am " + weekday_in_german + ", den " + date_to_check.strftime("%d.%m.%Y") + " in der " + lesson_number + " Stunde bei " +\
", ".join([teacher.long_name for teacher in lesson.teachers]) + " in " + ", ".join([subject.long_name for subject in lesson.subjects]) + "\n\n"
else:
all_embed_fields[lesson_number] = {"Ausfall " + lesson_number: "Ausfall bei " + ", ".join([teacher.long_name for teacher in lesson.teachers]) + " in " + ", ".join([subject.long_name for subject in lesson.subjects])}
all_telegram_messages[lesson_number] = "Ausfall " + lesson_number + " am " + weekday_in_german + ", den " + date_to_check.strftime("%d.%m.%Y") + " bei " +\
", ".join([teacher.long_name for teacher in lesson.teachers]) + " in " + ", ".join([subject.long_name for subject in lesson.subjects]) + "\n\n"
elif lesson_number in already_cancelled_lessons.keys():
embed = discord.Embed(title=embed_title, color=discord.Color.from_rgb(77, 255, 77))
if lesson.teachers in already_cancelled_lessons.values():
del already_cancelled_lessons[lesson_number]
if len(lesson_number) == 1:
all_embed_fields[lesson_number] = {"KEIN Ausfall " + str(lesson_number) + ". Stunde (" + lesson.start.time().strftime("%H:%M") + " Uhr - " +
lesson.end.time().strftime("%H:%M") + " Uhr)": "KEIN Ausfall bei " + ", ".join([teacher.long_name for teacher in lesson.teachers]) + " in " +
", ".join([subject.long_name for subject in lesson.subjects])}
all_telegram_messages[lesson_number] = "KEIN Ausfall am " + weekday_in_german + ", den " + date_to_check.strftime("%d.%m.%Y") + " in der " + lesson_number + " Stunde bei " + \
", ".join([teacher.long_name for teacher in lesson.teachers]) + " in " + ", ".join([subject.long_name for subject in lesson.subjects]) + "\n\n"
else:
all_embed_fields[lesson_number] = {"KEIN Ausfall " + lesson_number: "KEIN Ausfall bei " + ", ".join([teacher.long_name for teacher in lesson.teachers]) + " in " +
", ".join([subject.long_name for subject in lesson.subjects])}
all_telegram_messages[lesson_number] = "KEIN Ausfall " + lesson_number + " am " + weekday_in_german + ", den " + date_to_check.strftime("%d.%m.%Y") + " bei " +\
", ".join([teacher.long_name for teacher in lesson.teachers]) + " in " + ", ".join([subject.long_name for subject in lesson.subjects]) + "\n\n"
if date_to_check.weekday() == 3:
self.all_cancelled_lessons_thursday = already_cancelled_lessons
self.all_ignored_lessons_thursday = all_ignored_lessons
elif date_to_check.weekday() == 4:
self.all_cancelled_lessons_friday = already_cancelled_lessons
self.all_ignored_lessons_friday = all_ignored_lessons
if len(all_telegram_messages) != 0 and len(all_embed_fields) != 0:
for number, content in all_embed_fields.items():
embed.add_field(name=list(content.keys())[0], value=list(content.values())[0])
telegram_message += all_telegram_messages[number]
await self.discord_channel.send(embed=embed)
await self.telegram_bot.send_message(self.telegram_group_id, telegram_message)
logging.info("Send message(s) (content from telegram message): " + telegram_message.replace("\n\n", "\n"))
except Exception:
logging.warning("An unexpected error occured, while trying to check the schedule\n" + format_exc())
await self.discord_channel.send("Ein Fehler trat auf, während der Stundenplan auf Veränderungen überprüft wurde. Siehe Logs für Details")
await self.telegram_bot.send_message(self.telegram_group_id, "Ein Fehler trat auf, während der Stundenplan auf veränderungen überprüft wurde. Siehe Logs für Details")
async def main(self, check_time: int = 60 * 60) -> None:
"""Überprüft nach einer gewissen Zeit immer wieder, ob veraltete Infos exestieren"""
try:
self.session = Session(server="asopo.webuntis.com",
username=argv[3],
password=argv[4],
school="Konrad-Zuse-schule",
useragent="")
try:
self.session.login()
except Exception as e:
logging.warning("A login error occurred (" + "\n".join([arg for arg in e.args]) + ")")
await self.discord_channel.send("Ein (Web)Untis Loginfehler ist aufgetreten. Siehe Logs für Details")
await self.telegram_bot.send_message(self.telegram_group_id, "Ein (Web)Untis Loginfehler ist aufgetrten. Siehe Logs für Details")
except IndexError:
logging.warning("No username and / or password for webuntis is / are given")
await self.discord_channel.send("Ein (Web)Untis Loginfehler ist aufgetreten. Siehe Logs für Details")
await self.telegram_bot.send_message(self.telegram_group_id, "Ein (Web)Untis Loginfehler ist aufgetrten. Siehe Logs für Details")
except Exception:
logging.warning("An exception for the webuntis session occurred:\n" + format_exc())
await self.discord_channel.send("Ein (Web)Untis Loginfehler ist aufgetreten. Siehe Logs für Details")
await self.telegram_bot.send_message(self.telegram_group_id, "Ein (Web)Untis Loginfehler ist aufgetrten. Siehe Logs für Details")
while True:
if self.session is not None:
today = date.today()
today_weekday = today.weekday()
if today_weekday == 3: # donnerstag
await self.__check_and_send_cancelled_lessons(today + timedelta(days=1))
if datetime.now().hour > 12: # wenn es über 12 uhr ist, wird angefangen nach ausfall in der nächsten woche zu suchen
if self.which_thursday < today:
self.all_cancelled_lessons_thursday = {}
self.all_ignored_lessons_thursday = {}
await self.__check_and_send_cancelled_lessons(today + timedelta(days=7))
else:
await self.__check_and_send_cancelled_lessons(today + timedelta(days=7))
else:
await self.__check_and_send_cancelled_lessons(today)
elif today_weekday == 4: # freitag
await self.__check_and_send_cancelled_lessons(today + timedelta(days=6))
if datetime.now().hour > 12: # wenn es über 12 uhr ist, wird angefangen nach ausfall in der nächsten woche zu gucken
if self.which_friday < today:
self.all_cancelled_lessons_friday = {}
self.all_cancelled_lessons_friday = {}
await self.__check_and_send_cancelled_lessons(today + timedelta(days=7))
else:
await self.__check_and_send_cancelled_lessons(today + timedelta(days=7))
else:
await self.__check_and_send_cancelled_lessons(today)
else:
for day in range(1, 6):
new_day = today + timedelta(days=day)
if new_day.weekday() in [3, 4]:
await self.__check_and_send_cancelled_lessons(new_day)
try:
infos = Infos()
today = datetime.today()
for child in infos.root:
child_date = child.tag[1:].split("-")
for index, x in enumerate(child_date):
if x.startswith("0"):
child_date[index] = x[1:]
if today > datetime(int(child_date[2]), int(child_date[1]), int(child_date[0]) + 1):
infos.delete(child.tag)
logging.info("Removed informations for day " + child.tag)
logging.info("Checked for old informations")
except Exception:
logging.warning("An unexpected error occured, while trying to check the infos\n" + format_exc())
await self.discord_channel.send("Ein Fehler trat auf, während die Infos Datei auf alte Daten überprüft wurde. Siehe Logs für Details")
await self.telegram_bot.send_message(self.telegram_group_id, "Ein Fehler trat auf, während die Infos Datei auf alte Daten überprüft wurde. Siehe Logs für Details")
await asyncio.sleep(check_time) # schläft die gegebene Zeit und checkt dann wieder von neuem, ob sich was am Stundenplan geändert hat / ob Infos gelöscht werden können
if __name__ == '__main__':
schedule_and_more_bot = ScheduleAnMoreBot()
schedule_and_more_bot.run(argv[1])
|
nilq/baby-python
|
python
|
import requests
import reconcile.utils.threaded as threaded
import reconcile.queries as queries
from reconcile.dashdotdb_base import DashdotdbBase, LOG
QONTRACT_INTEGRATION = 'dashdotdb-dvo'
class DashdotdbDVO(DashdotdbBase):
def __init__(self, dry_run, thread_pool_size):
super().__init__(dry_run, thread_pool_size, "DDDB_DVO:",
'deploymentvalidation')
self.chunksize = self.secret_content.get('chunksize') or '20'
@staticmethod
def _chunkify(data, size):
for i in range(0, len(data), int(size)):
yield data[i:i+int(size)]
def _post(self, deploymentvalidation):
if deploymentvalidation is None:
return
cluster = deploymentvalidation['cluster']
# dvd.data.data.result.[{metric,values}]
dvresult = deploymentvalidation.get('data').get('data').get('result')
if dvresult is None:
return
LOG.info('%s Processing (%s) metrics for: %s', self.logmarker,
len(dvresult),
cluster)
if not self.chunksize:
self.chunksize = len(dvresult)
if len(dvresult) <= int(self.chunksize):
metrics = dvresult
else:
metrics = list(self._chunkify(dvresult, self.chunksize))
LOG.info('%s Chunked metrics into (%s) elements for: %s',
self.logmarker,
len(metrics),
cluster)
# keep everything but metrics from prom blob
deploymentvalidation['data']['data']['result'] = []
response = None
for metric_chunk in metrics:
# to keep future-prom-format compatible,
# keeping entire prom blob but iterating on metrics by
# self.chunksize max metrics in one post
dvdata = deploymentvalidation['data']
# if metric_chunk isn't already a list, make it one
if isinstance(metric_chunk, list):
dvdata['data']['result'] = metric_chunk
else:
dvdata['data']['result'] = [metric_chunk]
if not self.dry_run:
endpoint = (f'{self.dashdotdb_url}/api/v1/'
f'deploymentvalidation/{cluster}')
response = self._do_post(endpoint, dvdata, (5, 120))
try:
response.raise_for_status()
except requests.exceptions.RequestException as details:
LOG.error('%s error posting DVO data (%s): %s',
self.logmarker, cluster, details)
LOG.info('%s DVO data for %s synced to DDDB',
self.logmarker, cluster)
return response
def _get_deploymentvalidation(self, validation, clusterinfo):
cluster, promurl, ssl_verify, promtoken = self._get_prometheus_info(
clusterinfo)
LOG.debug('%s processing %s, %s',
self.logmarker, cluster, validation)
try:
deploymentvalidation = self._promget(url=promurl,
params={
'query': (validation)},
token=promtoken,
ssl_verify=ssl_verify)
except requests.exceptions.RequestException as details:
LOG.error('%s error accessing prometheus (%s): %s',
self.logmarker, cluster, details)
return None
return {'cluster': cluster,
'data': deploymentvalidation}
# query the prometheus instance on a cluster and retrieve all the metric
# names. If a filter is provided, use that to filter the metric names
# via startswith and return only those that match.
# Returns a map of {cluster: cluster_name, data: [metric_names]}
def _get_validation_names(self, clusterinfo, filter=None):
cluster, promurl, ssl_verify, promtoken = self._get_prometheus_info(
clusterinfo)
LOG.debug('%s retrieving validation names for %s, filter %s',
self.logmarker, cluster, filter)
try:
uri = '/api/v1/label/__name__/values'
deploymentvalidation = self._promget(url=promurl,
params={},
token=promtoken,
ssl_verify=ssl_verify,
uri=uri)
except requests.exceptions.RequestException as details:
LOG.error('%s error accessing prometheus (%s): %s',
self.logmarker, cluster, details)
return None
if filter:
deploymentvalidation['data'] = [
n for n in deploymentvalidation['data']
if n.startswith(filter)
]
return {'cluster': cluster,
'data': deploymentvalidation['data']}
def _get_prometheus_info(self, clusterinfo):
cluster_name = clusterinfo['name']
url = clusterinfo['prometheus']
ssl_verify = False if clusterinfo['private'] else True
token = self._get_automationtoken(clusterinfo['tokenpath'])
return cluster_name, url, ssl_verify, token
@staticmethod
def _get_clusters(cnfilter=None):
# 'cluster': 'fooname'
# 'private': False
# 'prometheus': 'https://prometheus.baz.tld'
# 'tokenpath':
# 'path': 'app-sre/creds/kubeube-configs/barpath'
# 'field': 'token', 'format': None}
results = []
clusters = queries.get_clusters(minimal=True)
for i in clusters or []:
if i.get('ocm') is not None and i.get('prometheusUrl') is not None:
results.append({
"name": i['name'],
"tokenpath": i['automationToken'],
"private": i['spec']['private'] or False,
"prometheus": i['prometheusUrl']
})
if cnfilter:
return [result for result in results if result['name'] == cnfilter]
return results
def run(self, cname=None):
validation_list = threaded.run(func=self._get_validation_names,
iterable=self._get_clusters(cname),
thread_pool_size=self.thread_pool_size,
filter='deployment_validation_operator')
validation_names = {}
if validation_list:
validation_names = {v['cluster']: v['data']
for v in validation_list if v}
clusters = self._get_clusters(cname)
self._get_token()
for cluster in clusters:
cluster_name = cluster['name']
if cluster_name not in validation_names:
LOG.debug('%s Skipping cluster: %s',
self.logmarker, cluster_name)
continue
LOG.debug('%s Processing cluster: %s',
self.logmarker, cluster_name)
validations = threaded.run(func=self._get_deploymentvalidation,
iterable=validation_names[cluster_name],
thread_pool_size=self.thread_pool_size,
clusterinfo=cluster)
threaded.run(func=self._post, iterable=validations,
thread_pool_size=self.thread_pool_size)
self._close_token()
def run(dry_run=False, thread_pool_size=10, cluster_name=None):
dashdotdb_dvo = DashdotdbDVO(dry_run, thread_pool_size)
dashdotdb_dvo.run(cluster_name)
|
nilq/baby-python
|
python
|
#coding=utf-8
#
# Copyright (C) 2015 24Hours TECH Co., Ltd. All rights reserved.
# Created on Mar 21, 2014, by Junn
#
#
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
import settings
from managers import CustomUserManager
from core.models import BaseModel
from django.core.cache import cache
from django.contrib.auth import login
GENDER_CHOICES = (
('M', u'Male'),
('F', u'Female'),
('U', u'Unknown'),
)
ACCT_TYPE_CHOICES = (
('E', u'显式注册'), #正常流程注册
('I', u'邀请注册'), #被邀请形式隐式注册
('O', u'第3方登录注册')
)
VALID_ATTRS = ('phone', 'nickname', 'gender', 'birth', 'email')
def mk_key(id):
return 'u%s' % id
# 用户资料各项信息的修改位
DEFAULT_PDU = '100000'
PDU_ITEMS = {
'phone': 0,
'avatar': 1,
'nickname': 2,
'innername': 3,
'birth': 4,
'gender': 5
}
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(u'用户名', max_length=255, unique=True)
phone = models.CharField(u'手机号', max_length=18, blank=True, null=True, default='')
email = models.EmailField('Email', blank=True, null=True, default='')
is_staff = models.BooleanField(_('staff status'), default=False)
is_active = models.BooleanField(_('active'), default=True)
date_joined = models.DateTimeField(u'注册时间', auto_now_add=True)
acct_type = models.CharField(u'账号类型', max_length=1, choices=ACCT_TYPE_CHOICES, default='E')
nickname = models.CharField(u'昵称', max_length=32, null=True, blank=True, default='')
gender = models.CharField(u'性别', max_length=1, choices=GENDER_CHOICES, default='U')
# 该字段仅存储文件名(不包括路径), 大图小图同名且以不同的路径区分
avatar = models.CharField(u'头像', max_length=80, blank=True, null=True, default=settings.USER_DEFAULT_AVATAR)
birth = models.DateField(u'生日', null=True, blank=True, auto_now_add=True)
# 个人资料完成度标识, 0位表示未填写, 1位表示已填
# 各位置从左到右依次为: phone, avatar, nickname, innername, birth, gender
pdu = models.CharField(max_length=10, default=DEFAULT_PDU)
login_count = models.IntegerField(u'登录次数', default=0)
last_login_ip = models.IPAddressField(u'最后登录IP', null=True, blank=True)
USERNAME_FIELD = 'username'
backend = 'django.contrib.auth.backends.ModelBackend' # user登录时需要该backend
objects = CustomUserManager()
def __unicode__(self):
return self.nickname if self.nickname else self.username
class Meta:
verbose_name = u'用户'
verbose_name_plural = u'用户'
app_label = 'users'
swappable = 'AUTH_USER_MODEL'
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
def update(self, data, new_avatar=None):
return self
def cache(self):
cache.set(mk_key(self.id), self, timeout=0) #永不过期
def clear_cache(self):
cache.delete(mk_key(self.id)) #TODO: maybe put this into baseModel
def save_avatar(self, avatar_file):
pass
def get_avatar_path(self): #返回头像全路径
if not self.avatar:
return ''
return '%s%s/%s' % (settings.MEDIA_URL, settings.USER_AVATAR_DIR['thumb'], self.avatar)
def post_login(self, req):
"""登录及后续其他处理.
:param req: django request请求对象"""
login(req, self)
if 'HTTP_X_FORWARDED_FOR' in req.META.keys():
self.last_login_ip = req.META['HTTP_X_FORWARDED_FOR']
else:
self.last_login_ip = req.META['REMOTE_ADDR']
self.incr_login_count() #登录次数+1
self.save()
self.cache()
def is_invited_first_login(self):
'''是否被亲友邀请注册用户首次手机号登录'''
if not self.is_active and self.is_invited_signup():
return True
return False
# def save_thumb(self, thumb_size):
# if not self.avatar:
# return
#
# DJANGO_TYPE = self.avatar.file.content_type
#
# image = Image.open(StringIO(self.avatar.read()))
# image.thumbnail(thumb_size, Image.ANTIALIAS)
#
# # save the thumbnail to memory
# temp_handle = StringIO()
# image.save(temp_handle, 'png')
# temp_handle.seek(0) # rewind the file
#
# # save to the thumbnail field
# suf = SimpleUploadedFile(os.path.split(self.avatar.name)[-1], temp_handle.read(), content_type=DJANGO_TYPE)
# self.thumb.save(self.avatar.name, suf, save=False)
def is_invited_signup(self):
return True if self.acct_type == 'I' else False
def get_short_name(self):
return self.nickname if self.nickname else self.username
def get_username(self):
return self.username
def get_full_name(self):
return self.username
def get_bound_user(self):
if self.bound_uid:
return User.objects.get(id=self.bound_uid)
return self
def update_pdu(self, index):
'''更新个人资料完成度.
一旦填写某项资料, 则设置完成度标识位为1(未设置时为0), index表示位置序号,从0开始
'''
if self.pdu[index] == '1':
return
ps = list(self.pdu)
ps[index] = '1'
self.pdu = ''.join(ps)
############################################################
def incr_login_count(self):
'''登录次数加1'''
self.login_count += 1
def is_invited_signup_passwd_set_required(self):
return True if self.is_invited_signup() and not self.is_active else False
class Profile(BaseModel):
user = models.ForeignKey('users.User', verbose_name=u'用户')
#city = models.CharField(u'城市', max_length=20, null=True)
address = models.CharField(u'地址', max_length=50, null=True)
def __unicode__(self):
return self.id
class Meta:
verbose_name = u'用户详情'
verbose_name_plural = u'用户详情'
class PasswordResetRecord(BaseModel):
user = models.ForeignKey(User, verbose_name=u'用户')
key = models.CharField(u'重置密码验证码', max_length=100)
is_valid = models.BooleanField(u'是否可用', default=True)
def __unicode__(self):
return "%s, %s, %s" % (self.user, self.key, self.is_valid)
class Meta:
verbose_name = u'重置密码的验证码'
verbose_name_plural = u'重置密码的验证码'
class MobileBindingRecord(BaseModel):
user = models.ForeignKey(User, verbose_name=u'用户')
mobile = models.CharField(u'电话号码', max_length=16)
bound = models.BooleanField(u'是否绑定', default=True)
def __unicode__(self):
return "%s, %s" % (self.user, self.mobile)
class Meta:
verbose_name = u'手机绑定记录'
verbose_name_plural = u'手机绑定记录'
|
nilq/baby-python
|
python
|
from harmony_state import harmony_state
# this module opens MIDI input can receive MIDI signals from... some port. Which port? Let's see.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 7 10:34:59 2020
@author: johntimothysummers
"""
import mido
from harmony_state import harmony_state
from collections import deque
import time
import numpy as np
import pt_utils
import pt_live_graph
class midi_note_pitchclass_collector():
pclass_count = np.zeros(12, dtype=int)
current_notegroup = 0
def add_note(self, midi_note):
self.pclass_count[midi_note % 12] += 1
self.current_notegroup = pt_utils.numpy_array_to_binary_notegroup(self.pclass_count)
def remove_note(self, midi_note):
if (self.pclass_count[midi_note % 12] > 0):
self.pclass_count[midi_note % 12] -= 1
self.current_notegroup = pt_utils.numpy_array_to_binary_notegroup(self.pclass_count)
def play_current_kpdve(outport, current_state):
for e in current_state.current_kpdve_notes():
simple_midi_note(outport, e)
def play_root(outport, current_state):
simple_midi_note(outport, current_state.root_note(), 1)
def simple_midi_note(outport, note_num, channel=0):
msg = mido.Message('note_on', note=note_num, channel=channel)
#msg_off = mido.Message('note_off', note=note_num, channel=channel)
outport.send(msg)
#outport.send(msg_off)
def ask_in_out_ports():
'''
Get user in/out from list
Parameters
----------
def ask_in : None
Prompts user.
Returns
-------
inport, outport tuple
'''
ins = mido.get_input_names()
max_ins = len(ins)-1
print(ins)
in_idx = input(f'choose input from list: (0...{max_ins}) ')
in_idx = int(in_idx) % (max_ins + 1)
inport = mido.open_input(ins[in_idx])
outs = mido.get_output_names()
max_outs = len(outs)-1
print(outs)
out_idx = input(f'choose output from list: (0...{max_outs}) ')
out_idx = int(out_idx) % (max_outs + 1)
outport = mido.open_output(outs[out_idx])
return inport, outport
def analyze_midi_piano_input():
inport, outport = ask_in_out_ports()
p_classes = midi_note_pitchclass_collector()
current_state = harmony_state()
graph_window = pt_live_graph.live_harmony_graph(current_state)
msglog = deque()
while True:
msg = inport.receive()
change_harmony = False
print(msg) ### find out what sort of a thing this is...
if (msg.type == "note_on"):
if msg.velocity > 0:
p_classes.add_note(msg.note)
else:
p_classes.remove_note(msg.note)
print(p_classes.pclass_count)
change_harmony = current_state.change_notegroup(p_classes.current_notegroup)
msglog.append({"msg": msg, "due": time.time()})
print(current_state.current_root_string() + " as " + current_state.current_function_string() + " of " + current_state.current_conv_tonic_string() + " " + current_state.current_conv_pattern_string())
elif (msg.type == "note_off"):
p_classes.remove_note(msg.note)
print(p_classes.pclass_count)
change_harmony = current_state.change_notegroup(p_classes.current_notegroup)
elif (msg.type == "control_change"):
if (msg.control == 1): # joystick:1
if(msg.value == 0):
change_harmony = current_state.param_increment(1, 1)
elif (msg.value == 127):
change_harmony = current_state.param_increment(1, -1)
elif (msg.type == "pitchwheel"):
if msg.pitch == -8192:
change_harmony = current_state.param_increment(2, -1)
elif msg.pitch == 8191:
change_harmony = current_state.param_increment(2, 1)
while len(msglog) > 0 and msglog[0]["due"] <= time.time():
outport.send(msglog.popleft()["msg"])
if (change_harmony == True):
graph_window.update_window_for_state()
time.sleep(0.001)
if __name__ == "__main__":
analyze_midi_piano_input()
|
nilq/baby-python
|
python
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class AppleJackSpider(BaseSpider):
name = 'applejack.com'
allowed_domains = ['www.applejack.com', 'applejack.com']
start_urls = ()
def __init__(self, *args, **kwargs):
super(AppleJackSpider, self).__init__(*args, **kwargs)
self.skus = set()
with open(os.path.join(HERE, 'applejack_skus.csv'), 'rb') as f:
reader = csv.reader(f)
reader.next()
for row in reader:
self.skus.add(row[0])
def start_requests(self):
search_url = u'http://www.applejack.com/search/?criteria=%s&product_category=wine%%2Cspirits%%2Cbeer%%2Ccordials_liqueurs&x=0&y=0'
for sku in self.skus:
yield Request(search_url % sku, dont_filter=True, meta={'sku': sku})
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# pagination
# next_page = hxs.select(u'//a[@title="Next Page"]/@href').extract()
# if next_page:
# next_page = urljoin_rfc(get_base_url(response), next_page[0])
# yield Request(next_page, meta=response.meta)
# products
products = hxs.select(u'//div[@class="productcatalog-search-result"]/h4/a/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, meta=response.meta, dont_filter=True, callback=self.parse_product)
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_value('url', response.url)
loader.add_value('sku', re.search('product/(\d+)', response.url).groups())
name = hxs.select(u'//h1[@class="pagetitle"]/text()').extract()[0].strip()
bottle_size = hxs.select(u'//div[child::strong[contains(text(), "Bottle Size") or contains(text(), "Size of Bottle")]]/span/text()')
if not bottle_size:
bottle_size = hxs.select(u'//div[contains(text(),"Size of Bottle")]/span/text()')
name += ' ' + bottle_size.extract()[0].strip()
loader.add_value('name', name)
loader.add_xpath('price', u'//div[@class="cardPrice"]/text()')
if not loader.get_output_value('price'):
loader.add_xpath('price', u'//div[@class="salePrice"]/text()')
if not loader.get_output_value('price'):
loader.add_xpath('price', u'//div[@class="regularPrice"]/text()')
if not loader.get_output_value('price'):
loader.add_xpath('price', u'//div[@class="regularprice"]/text()')
site_sku = hxs.select(u'//span[@class="itemnumber"]/text()').re(u'- (.*)')[0].strip()
search_sku = response.meta['sku'].strip()
if site_sku == search_sku:
yield loader.load_item()
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.7 on 2020-09-03 13:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Job', '0005_auto_20200903_0602'),
]
operations = [
migrations.AddField(
model_name='certificates',
name='image',
field=models.ImageField(blank=True, upload_to='image/'),
),
migrations.AddField(
model_name='job',
name='image',
field=models.ImageField(blank=True, upload_to='image/'),
),
migrations.AddField(
model_name='project',
name='image',
field=models.ImageField(blank=True, upload_to='image/'),
),
migrations.AddField(
model_name='volunteer',
name='image',
field=models.ImageField(blank=True, upload_to='image/'),
),
]
|
nilq/baby-python
|
python
|
"""
This is meant for loading the definitions from an external file.
"""
import os.path
from .backend import EmptyBackend
from .driver import Driver
from .errors import CompilerError
from .lexer import Lexer
from . import symbols
from . import types
# Since a file isn't going to change in the middle of our run, there's no
# point in processing it more than once
IMPORT_CACHE = {}
class RequireProcessor(EmptyBackend):
"""
This is a limited kind of backend, which only stores types which are
defined in other files.
"""
@staticmethod
def require(filename, backend):
"""
Returns a RequireProcessor which has processed the given filename,
or None if this import has already been processed.
"""
abs_filename = os.path.abspath(filename)
if abs_filename in IMPORT_CACHE:
return IMPORT_CACHE[abs_filename]
# This has to be set to None, so that circular imports are avoided. They
# shouldn't happen anyway, but this makes for an easy additional level
# of safety
IMPORT_CACHE[abs_filename] = None
with open(filename) as require_stream:
req_processor = RequireProcessor(filename, backend)
lex = Lexer(require_stream, filename)
drv = Driver(lex, req_processor)
drv.compile()
IMPORT_CACHE[abs_filename] = req_processor
return req_processor
def __init__(self, filename, real_backend):
self.real_backend = real_backend
self.in_function = False
self.import_list = set()
self.exported_values = set()
self.exported_types = set()
self.file_namespace = None
self.context = symbols.Context()
self.filename = filename
self.line = 0
self.col = 0
def _value_is_defined(self, name):
"""
Returns True if the given variable is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.context.values and
self.context.values.is_visible(name))
def _type_is_defined(self, name):
"""
Returns True if the given type is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.context.types and
self.context.types.is_visible(name))
def _platform(self):
"""
Returns the (OS, architecture) pair of the underlying backend.
"""
return self.real_backend._platform()
def update_position(self, line, col):
"""
Updates the processor with the current location in the input file.
"""
self.line = line
self.col = col
def _register_require(self, filename):
"""
Registers that the given file has been required. Raises a ValueError
if the filename has already been imported.
"""
abs_filename = os.path.abspath(filename)
if abs_filename in self.import_list:
raise ValueError('Circular import')
self.import_list.add(abs_filename)
return abs_filename
def _write_comment(self, comment, *args, **kwargs):
"""
Passes a comment back to the backend. Needed for static conditionals.
"""
self.real_backend._write_comment(comment, *args, **kwargs)
def handle_func_def_start(self, *_):
"""
Ignore any definitions restricted to functions.
"""
self.in_function = True
def handle_func_def_end(self):
"""
Stop ignoring the next declaration block.
"""
self.in_function = False
def handle_namespace(self, namespace):
"""
Sets the current namespace, if one is not defined.
"""
if self.file_namespace is not None:
raise CompilerError(self.filename, self.line, self.col,
"Namespace already assigned")
self.file_namespace = namespace
self.context = self.context.register(namespace)
def handle_require(self, filename):
"""
This invokes itself recursively, as long as the require would not be
circular.
"""
if self.file_namespace is None:
raise CompilerError(self.filename, self.line, self.col,
"Must define a file namespace before executing a require")
try:
filename = self._register_require(filename)
except ValueError:
raise CompilerError(self.filename, self.line, self.col,
"Circular require detected: '{}'", filename)
try:
req_processor = RequireProcessor.require(filename, self.real_backend)
if req_processor is None:
return
for val_name in req_processor.exported_values:
self.context.values.meta_get(val_name, 'visible').add(self.file_namespace)
for type_name in req_processor.exported_types:
self.context.types.meta_get(type_name, 'visible').add(self.file_namespace)
except OSError:
raise CompilerError(self.filename, self.line, self.col,
"Could not open file '{}' for reading", filename)
def handle_decl(self, name, decl_type):
"""
Records the declaration in the external store.
"""
if self.in_function:
return
if self.file_namespace is None:
raise CompilerError(self.filename, self.line, self.col,
"Must define a file namespace before executing a declare")
was_type_name = isinstance(decl_type, types.TypeName)
decl_type = types.resolve_name(decl_type, self.context.types)
if isinstance(decl_type, types.StringLiteral):
self.context.values[name] = types.PointerTo(types.Byte)
self.context.values.meta_set(name, 'visible', {self.file_namespace})
self.context.values.meta_set(name, 'array', True)
self.context.values.meta_set(name, 'global', True)
elif was_type_name or isinstance(decl_type, types.RAW_TYPES):
was_array = isinstance(decl_type, types.ArrayOf)
self.context.values[name] = types.decay_if_array(decl_type)
self.context.values.meta_set(name, 'visible', {self.file_namespace})
self.context.values.meta_set(name, 'global', True)
if was_array:
self.context.values.meta_set(name, 'array', True)
elif isinstance(decl_type, types.Struct):
self.context.types[name] = decl_type
self.context.types.meta_set(name, 'visible', {self.file_namespace})
elif isinstance(decl_type, types.FunctionDecl):
full_decl_type = symbols.namespace_func_decl(
decl_type,
self.file_namespace)
self.context.values[name] = full_decl_type
self.context.values.meta_set(name, 'visible', {self.file_namespace})
self.context.values.meta_set(name, 'global', True)
elif isinstance(decl_type, types.AliasDef):
self.context.types[name] = decl_type
self.context.types.meta_set(name, 'visible', {self.file_namespace})
def handle_exports(self, names):
"""
Moves the exported names into the export list, so that they are
visible to the main backend.
"""
def check_non_foreign(name, context):
"""
Ensures that the given name doesn't resolve to an identifier
that belongs to a foreign namespace.
Allowing these to be re-exported would lead to 'origination
issues', since moving them from one namespace to another would
lose the original name. Since this is required for globals,
that would have to be stored somewhere, which complicates
things.
"""
namespace, _ = symbols.split_namespace(context.resolve(name))
if namespace != self.file_namespace:
raise CompilerError(self.filename, self.line, self.col,
'Cannot re-export foreign value or type "{}"', name)
for name in names:
if name[0] == "'":
name = name[1:]
check_non_foreign(name, self.context.values)
try:
type_obj = self.context.values[name]
except KeyError:
raise CompilerError(self.filename, self.line, self.col,
'Cannot export undefined value "{}"')
self.exported_values.add(self.context.values.resolve(name))
elif name[0] == '*':
name = name[1:]
check_non_foreign(name, self.context.types)
try:
type_decl = self.context.types[name]
except KeyError:
raise CompilerError(self.filename, self.line, self.col,
'Cannot export undefined type "{}"', name)
self.exported_types.add(self.context.types.resolve(name))
else:
raise CompilerError(self.filename, self.line, self.col,
"Exported name must be prefixed with ' or *")
|
nilq/baby-python
|
python
|
from requests import Session
from uuid import uuid4
from base64 import b64encode
from hashlib import sha1
from datetime import datetime
from adobe_analytics.config import BASE_URL
from adobe_analytics.exceptions import ApiError
class OmnitureSession:
def __init__(self, username=None, secret=None, company=None,
api_version=None, proxies=None, timeout=None):
# Due to Adobe's API docs _telling_ you to use username:company
# as your username, accept both methods of input
if company:
self.username = '{}:{}'.format(username, company)
else:
self.username = username
self._secret = secret
self.timeout = timeout
self.session = Session()
# Ensure successful login
response = self.session.get(
BASE_URL,
params={'method':'Company.GetEndpoint'},
headers=self.generate_wsse_header()
)
response.raise_for_status()
r = response.json()
if 'error' in r:
raise ApiError(r)
else:
self.base_url = r
def generate_wsse_header(self):
# Adapted from Adobe's analytics-1.4-apis documentation
# docs/authentication/using_web_service_credentials.md
nonce = str(uuid4())
created = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S z')
sha = sha1((nonce + created + self._secret).encode())
digest = b64encode(sha.digest()).decode()
b64nonce = b64encode(nonce.encode()).decode()
header = 'UsernameToken Username="{username}", '\
'PasswordDigest="{digest}", '\
'Nonce="{nonce}", Created="{created}"'
header = header.format(
username=self.username,
digest=digest,
nonce=b64nonce,
created=created
)
return {'X-WSSE': header}
|
nilq/baby-python
|
python
|
maximoImpar = int(input("Até que número gostaria de lista os impares?: "))
for x in range(maximoImpar):
if x % 2 != 0:
print(x)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
BA 08 NGA model
"""
from .utils import *
class BA08_nga:
"""
Class of NGA model of Boore and Atkinson 2008
"""
def __init__(self):
"""
Model initialization
"""
# 0. Given parameters (period independent parameters)
self.a1 = 0.03 # in gravity (g)
self.a2 = 0.09 # in gravity (g)
self.pgalow = 0.06 # in gravity (g)
self.V1 = 180. # in m/s
self.V2 = 300. # in m/s
self.Vref = 760. # in m/s
# 1. List of periods with defined coefficients (PGA is -1; PGV is -2)
self.periods = [ -2.0, -1.0, 0.01, 0.02, 0.03, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0 ]
# ===============================
# period-dependent coefficients
# ===============================
# 2. List of distance-scaling coefficients
c1s = [ -0.87370, -0.66050, -0.66220, -0.66600, -0.69010, -0.71700, -0.72050,
-0.70810, -0.69610, -0.58300, -0.57260, -0.55430, -0.64430, -0.69140,
-0.74080, -0.81830, -0.83030, -0.82850, -0.78440, -0.68540, -0.50960,
-0.37240, -0.09824 ]
c2s = [ 0.10060, 0.11970, 0.12000, 0.12280, 0.12830, 0.13170, 0.12370, 0.11170,
0.09884, 0.04273, 0.02977, 0.01955, 0.04394, 0.06080, 0.07518, 0.10270,
0.09793, 0.09432, 0.07282, 0.03758, -0.02391, -0.06568, -0.13800 ]
c3s = [ -0.00334, -0.01151, -0.01151, -0.01151, -0.01151, -0.01151, -0.01151,
-0.01151, -0.01113, -0.00952, -0.00837, -0.00750, -0.00626, -0.00540,
-0.00409, -0.00334, -0.00255, -0.00217, -0.00191, -0.00191, -0.00191,
-0.00191, -0.00191 ]
hs = [ 2.54, 1.35, 1.35, 1.35, 1.35, 1.35, 1.55, 1.68, 1.86, 1.98, 2.07, 2.14,
2.24, 2.32, 2.46, 2.54, 2.66, 2.73, 2.83, 2.89, 2.93, 3.00, 3.04 ] # in km
e1s = [ 5.00121, -0.53804, -0.52883, -0.52192, -0.45285, -0.28476, 0.00767,
0.20109, 0.46128, 0.57180, 0.51884, 0.43825, 0.39220, 0.18957, -0.21338,
-0.46896, -0.86271, -1.22652, -1.82979, -2.24656, -1.28408, -1.43145,
-2.15446 ]
e2s = [ 5.04727, -0.50350, -0.49429, -0.48508, -0.41831, -0.25022, 0.04912,
0.23102, 0.48661, 0.59253, 0.53496, 0.44516, 0.40602, 0.19878,
-0.19496, -0.43443, -0.79593, -1.15514, -1.74690, -2.15906, -1.21270,
-1.31632, -2.16137 ]
e3s = [ 4.63188, -0.75472, -0.74551, -0.73906, -0.66722, -0.48462, -0.20578,
0.03058, 0.30185, 0.4086, 0.3388, 0.25356, 0.21398, 0.00967, -0.49176,
-0.78465, -1.20902, -1.57697, -2.22584, -2.58228, -1.50904, -1.81022,
-2.53323 ]
e4s = [ 5.0821, -0.5097, -0.49966, -0.48895, -0.42229, -0.26092, 0.02706, 0.22193,
0.49328, 0.61472, 0.57747, 0.5199, 0.4608, 0.26337, -0.10813, -0.3933,
-0.88085, -1.27669, -1.91814, -2.38168, -1.41093, -1.59217, -2.14635 ]
e5s = [ 0.18322, 0.28805, 0.28897, 0.25144, 0.17976, 0.06369, 0.0117, 0.04697,
0.1799, 0.52729, 0.6088, 0.64472, 0.7861, 0.76837, 0.75179, 0.6788,
0.70689, 0.77989, 0.77966, 1.24961, 0.14271, 0.52407, 0.40387 ]
e6s = [ -0.12736, -0.10164, -0.10019, -0.11006, -0.12858, -0.15752, -0.17051,
-0.15948, -0.14539, -0.12964, -0.13843, -0.15694, -0.07843, -0.09054,
-0.14053, -0.18257, -0.2595, -0.29657, -0.45384, -0.35874, -0.39006,
-0.37578, -0.48492 ]
e7s = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00102, 0.08607, 0.10601, 0.02262, 0,
0.10302, 0.05393, 0.19082, 0.29888, 0.67466, 0.79508, 0, 0, 0 ]
Mhs = [ 8.5, 6.75, 6.75, 6.75, 6.75, 6.75, 6.75, 6.75, 6.75, 6.75, 6.75, 6.75,
6.75, 6.75, 6.75, 6.75, 6.75, 6.75, 6.75, 6.75, 8.5, 8.5, 8.5 ]
self.Mref = 4.5 # moment magnitude
self.Rref = 1.0 # in km
# 3. list of site-amplification coefficients (period-dependent)
blins = [ -0.6, -0.36, -0.36, -0.34, -0.33, -0.29, -0.23, -0.25, -0.28, -0.31,
-0.39, -0.44, -0.5, -0.6, -0.69, -0.7, -0.72, -0.73, -0.74, -0.75,
-0.75, -0.692, -0.65 ]
b1s = [ -0.5, -0.64, -0.64, -0.63, -0.62, -0.64, -0.64, -0.6, -0.53, -0.52,
-0.52, -0.52, -0.51, -0.5, -0.47, -0.44, -0.4, -0.38, -0.34, -0.31,
-0.291, -0.247, -0.215 ]
b2s = [ -0.06, -0.14, -0.14, -0.12, -0.11, -0.11, -0.11, -0.13, -0.18,
-0.19, -0.16, -0.14, -0.1, -0.06, 0 , 0, 0, 0, 0, 0, 0, 0, 0 ]
# 4. list of Aleatory uncertainties
# intra-event residual standard deviation
self.sigma0 = [ 0.5 , 0.502, 0.502, 0.502, 0.507, 0.516, 0.513, 0.52 ,
0.518, 0.523, 0.527, 0.546, 0.541, 0.555, 0.571, 0.573,
0.566, 0.58 , 0.566, 0.583, 0.601, 0.626, 0.645]
# inter-event residual standard deviation (when fault type is not specified)
self.tau_U = [ 0.286, 0.265, 0.267, 0.267, 0.276, 0.286, 0.322, 0.313,
0.288, 0.283, 0.267, 0.272, 0.267, 0.265, 0.311, 0.318,
0.382, 0.398, 0.41 , 0.394, 0.414, 0.465, 0.355]
# inter-event residual standard deviation (when fault type is specified)
self.tau_M = [ 0.256, 0.26 , 0.262, 0.262, 0.274, 0.286, 0.32 , 0.318,
0.29 , 0.288, 0.267, 0.269, 0.267, 0.265, 0.299, 0.302,
0.373, 0.389, 0.401, 0.385, 0.437, 0.477, 0.477]
self.sigma_TU = [ 0.576,0.566,0.569,0.569,0.578,0.589,0.606,0.608,
0.592,0.596,0.592,0.608,0.603,0.615,0.649,0.654,
0.684,0.702,0.7,0.702,0.73,0.781,0.735 ]
self.sigma_TM = [ 0.56, 0.564, 0.566, 0.566, 0.576, 0.589, 0.606, 0.608,
0.594, 0.596, 0.592, 0.608, 0.603, 0.615, 0.645, 0.647,
0.679, 0.7, 0.695, 0.698, 0.744, 0.787, 0.801 ]
# Old Coefs (period match)
self.Coefs = {}
for i in range(len(self.periods)):
T1 = self.periods[i]
Tkey = GetKey(T1)
self.Coefs[Tkey] = {}
self.Coefs[Tkey]['c1'] = c1s[i]
self.Coefs[Tkey]['c2'] = c2s[i]
self.Coefs[Tkey]['c3'] = c3s[i]
self.Coefs[Tkey]['h'] = hs[i]
self.Coefs[Tkey]['e1'] = e1s[i]
self.Coefs[Tkey]['e2'] = e2s[i]
self.Coefs[Tkey]['e3'] = e3s[i]
self.Coefs[Tkey]['e4'] = e4s[i]
self.Coefs[Tkey]['e5'] = e5s[i]
self.Coefs[Tkey]['e6'] = e6s[i]
self.Coefs[Tkey]['e7'] = e7s[i]
self.Coefs[Tkey]['Mh'] = Mhs[i]
self.Coefs[Tkey]['blin'] = blins[i]
self.Coefs[Tkey]['b1'] = b1s[i]
self.Coefs[Tkey]['b2'] = b2s[i]
self.CoefKeys = list(self.Coefs[list(self.Coefs.keys())[0]].keys())
self.fault = ['unspecified','strike-slip','normal','reverse','U','NM','SS','RV']
def __call__( self,M,Rjb,Vs30,T,rake, Mech=3, Ftype=None, AB11=None,CoefTerms={'terms':(1,1,1),'NewCoefs':None}):
"""
Compute IM for single period
required inputs:
M, Rjb, Vs30, T
rake: rake angle (degree), default is None (Unspecified fault type)
or give Mech instead of rake
Mech:
0: strike
1: normal
2: reverse
else: unspecified (U=1) (Default)
Ftype = 'U', or 'SS', or 'RV', or 'NM'
AB11: consider the recent correction to the median value
"""
# ==================
# Input variables
# ==================
self.M = float(M) # moment magnitude
self.Rjb = float(Rjb) # Joyner-Boore distance (km)
self.Vs30 = float( Vs30 ) # 30 meter averaged S wave velocity (m/s)
terms = CoefTerms['terms']
NewCoefs = CoefTerms['NewCoefs']
if T in self.periods:
self.T = T
else:
print('T is not in periods list, try to interpolate')
raise ValueError
# check inputs
if self.M == None or self.M < 0:
print('Moment magnitude must be a postive number')
raise ValueError
if self.Rjb == None or self.Rjb < 0:
print('Joyner-Boore distance must be a non-negative number')
raise ValueError
if self.Vs30 == None or self.Vs30 < 0:
print('Vs30 must be a positive number')
raise ValueError
self.rake = rake
self.Mech = Mech
if rake == None and Mech == None and Ftype == None:
print('either rake or (U,SS,NM,RV) should be provided')
raise ValueError
else:
if Ftype != None:
self.U = 1*(Ftype == 'U')
self.SS = 1*(Ftype == 'SS')
self.NM = 1*(Ftype == 'NM')
self.RV = 1*(Ftype == 'RV')
else:
if Mech != None and rake != None:
# giveng Mech and rake at the same time, use Mech, not rake
rake = None
if rake != None and Mech == None:
# Get ftype from rake
self.rake = rake
self.ftype()
if rake == None and Mech != None:
self.U = 1*(Mech>2)
self.SS = 1*(Mech==0)
self.NM = 1*(Mech==1)
self.RV = 1*(Mech==2)
self.AB11 = AB11
# modify the coefficients
if NewCoefs != None:
# only update Coefs given by NewCoefs (at self.T)
Tkey = GetKey( self.T )
NewCoefKeys = list(NewCoefs.keys())
for key in NewCoefKeys:
self.Coefs[Tkey][key] = NewCoefs[key]
# ======================
# begin to compute IM
# ======================
IM = self.compute_im(terms=terms)
sigmaT, tau, sigma = self.compute_std()
return IM, sigmaT, tau, sigma
# ============================
# Functions used in the class
# they could also be output for
# further regression analysis
# ============================
def ftype(self):
"""
Fault-Type
"""
FT = rake2ftype_BA( self.rake )
if FT not in self.fault:
print('Invalid fault type!')
print('It should be in one of the following list:')
print(self.fault)
raise ValueError
else:
if FT == 'unspecified' or FT == 'U':
self.U = 1
else:
self.U = 0
if FT == 'strike-slip' or FT == 'SS':
self.SS = 1
else:
self.SS = 0
if FT == 'normal' or FT == 'NM':
self.NM = 1
else:
self.NM = 0
if FT == 'reverse' or FT == 'RV':
self.RV = 1
else:
self.RV = 0
return FT
def moment_function(self, Tother=None):
"""
Magnitude-Moment scaling
"""
if Tother != None:
Ti = GetKey(Tother)
else:
Ti = GetKey(self.T)
e1 = self.Coefs[Ti]['e1']
e2 = self.Coefs[Ti]['e2']
e3 = self.Coefs[Ti]['e3']
e4 = self.Coefs[Ti]['e4']
e5 = self.Coefs[Ti]['e5']
e6 = self.Coefs[Ti]['e6']
e7 = self.Coefs[Ti]['e7']
Mh = self.Coefs[Ti]['Mh']
if self.M <= Mh:
return e1*self.U + e2*self.SS + e3*self.NM + e4*self.RV + \
e5*(self.M-Mh) + e6*(self.M-Mh)**2.
else:
return e1*self.U + e2*self.SS + e3*self.NM + e4*self.RV + \
e7*(self.M-Mh)
def distance_function(self,Tother=None):
"""
Distance function
Geometrical spreading? (yes ~ ln(R))
"""
if Tother != None:
Ti = GetKey(Tother)
else:
Ti = GetKey(self.T)
h = self.Coefs[Ti]['h']
c1 = self.Coefs[Ti]['c1']
c2 = self.Coefs[Ti]['c2']
c3 = self.Coefs[Ti]['c3']
R = np.sqrt( self.Rjb**2 + h**2 )
return (c1+c2*(self.M-self.Mref))*np.log(R/self.Rref)+c3*(R-self.Rref)
def soil_function(self, Vs30=None, Tother=None):
"""
Site Amplification Function
"""
if Vs30 != None:
self.Vs30 = Vs30
if Tother != None:
Ti = GetKey( Tother )
else:
Ti = GetKey(self.T )
# linear term
blin = self.Coefs[Ti]['blin']
flin = blin * np.log(self.Vs30/self.Vref)
# =================
# non-linear term
# =================
# 1. compute pga4nl, which is defined as the media PGA when Vs30=Vref=760 m/s
Tpga = -1.0 # compute PGA
pga4nl = np.exp( self.moment_function(Tother=Tpga) + self.distance_function(Tother=Tpga) )
b1 = self.Coefs[Ti]['b1']
b2 = self.Coefs[Ti]['b2']
if self.Vs30 <= self.V1:
bnl = b1
elif self.Vs30 > self.V1 and self.Vs30 <= self.V2:
bnl = (b1-b2)*np.log(self.Vs30/self.V2) / np.log(self.V1/self.V2) + b2
elif self.Vs30 > self.V2 and self.Vs30 < self.Vref:
bnl = b2*np.log( self.Vs30/self.Vref) / np.log(self.V2/self.Vref)
else:
bnl = 0
# 2. compute smoothing constants
dx = np.log( self.a2/self.a1 )
dy = bnl*np.log(self.a2/self.pgalow)
c = (3*dy-bnl*dx)/(dx**2)
d = -(2*dy-bnl*dx)/(dx**3)
# 3. final equation for nonlinear term
if pga4nl <= self.a1:
fnl = bnl * np.log( self.pgalow/0.1 )
elif pga4nl > self.a1 and pga4nl <= self.a2:
term = c*(np.log(pga4nl/self.a1))**2 + d * (np.log(pga4nl/self.a1))**3
fnl = bnl * np.log( self.pgalow/0.1) + term
else:
fnl = bnl * np.log( pga4nl/0.1 )
return flin+fnl
def compute_im(self,terms=(1,1,1)):
"""
Compute IM based on functional form of BA08 model
"""
IM = np.exp(terms[0]*self.moment_function()+
terms[1]*self.distance_function()+
terms[2]*self.soil_function())
if self.AB11 == None:
return IM
else:
# BA 2011 correction for intermediate magnitudes
fba = max(0,3.888-0.674*self.M)-max(0,2.933-0.510*self.M)*np.log10(self.Rjb+10.)
fba = 10**fba
return fba * IM
def compute_std(self):
if self.rake == None:
if self.U == 1:
FT = 'U'
if self.SS ==1:
FT = 'SS'
if self.NM == 1:
FT = 'NM'
if self.RV == 1:
FT = 'RV'
else:
FT = self.ftype()
try:
ind = (np.array( self.periods ) == self.T).nonzero()[0]
if FT == 'U':
return (self.sigma_TU[ind], self.tau_U[ind], self.sigma0[ind])
else:
return (self.sigma_TM[ind], self.tau_M[ind], self.sigma0[ind])
except:
print('inputed T not found in the available periods list, try to do interpolation')
raise ValueError
def BA08nga_test(T,CoefTerms):
"""
Test BA features
"""
# input parameter list
Rjb = 200.
Rjb = np.arange(1,200,5)
Vs30 = 748.0,1200.,345.,160.
Vs30 = 760.
Mw = 4.0
AB11 = None
rake = 0
Ftype = 'SS'
kwds = {'Mech':None,'Ftype':Ftype,'AB11':AB11,'CoefTerms':CoefTerms}
BAnga = BA08_nga() # BA08nga instance
values = mapfunc( BAnga, Mw, Rjb, Vs30, T, rake, **kwds )
for ivalue in range( len(values) ):
print(Rjb[ivalue], values[ivalue])
if __name__ == '__main__':
T = 10.0; NewCoefs = {'c1':-0.1,'c2':-0.14} # use the updated one
T = 10.0; NewCoefs = {'c1':-0.09824,'c2':-0.13800} # use the updated one
T = 10.0; NewCoefs = {'c1':-0.1,'c2':-0.1000} # use the updated one
T = 0.3; NewCoefs = None # pure one
print('BA SA at %s second'%('%3.2f'%T))
CoefTerms={'terms':(1,1,1),'NewCoefs':NewCoefs}
BAnga = BA08nga_test(T,CoefTerms)
#BAnga = BA08nga_test(T,CoefTerms)
T = -1.0
CoefTerms={'terms':(1,1,1),'NewCoefs':None}
print('BA PGA at %s second'%('%3.2f'%T))
BAnga = BA08nga_test(T,CoefTerms)
|
nilq/baby-python
|
python
|
import permstruct
import permstruct.dag
from permstruct.lib import Permutations
def loc_max(w):
'''
Helper function for stack-sort and bubble-sort. Returns the index of the
maximal element in w. It is assumed that w is non-empty.
'''
m = w[0]
i = 0
c = 0
for j in w[1:]:
c = c+1
if j > m:
m = j
i = c
return i, m
def stack_sort(w):
'''
Function takes a permutation w and does one pass of stack-sort on it
'''
i = len(w)
if i <= 1:
return list(w)
j,J = loc_max(w)
if j == 0:
W2 = stack_sort(w[1:i])
W2.append(J)
return W2
if j == i-1:
W1 = stack_sort(w[0:i-1])
W1.append(J)
return W1
W1 = stack_sort(w[0:j])
W2 = stack_sort(w[j+1:i])
W1.extend(W2)
W1.extend([J])
return W1
# Since we usually don't want overlays:
overlays = False
#------------------------------------------------#
# Stack-sorting
#-- 1-pass --#
# The perm_props are of course the same
# perm_prop = lambda p: p.avoids([2,3,1])
# perm_prop = lambda p: stack_sort(p) == range(1,len(p)+1)
# perm_bound = 7
# # inp_dag = permstruct.dag.N_P_X(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- 2-passes --#
# No luck with any of the dags below
perm_prop = lambda p: stack_sort(stack_sort(p)) == range(1,len(p)+1)
perm_bound = 7
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# inp_dag = permstruct.dag.classic_avoiders_length_3(perm_prop, perm_bound)
inp_dag = permstruct.dag.len_3_pairs(perm_prop, perm_bound)
max_rule_size = (3, 3)
max_non_empty = 4
max_rules = 100
ignored = 1
#------------------------------------------------#
if not overlays:
permstruct.exhaustive(perm_prop,
perm_bound,
inp_dag,
max_rule_size,
max_non_empty,
max_rules,
ignore_first = ignored)
else:
permstruct.exhaustive_with_overlays(perm_prop,
perm_bound,
inp_dag,
max_rule_size,
max_non_empty,
max_rules,
overlay_dag,
max_overlay_cnt,
max_overlay_size,
min_rule_size=(1,1))
|
nilq/baby-python
|
python
|
from Othello.Cell import Cell
from .Decorator import Decorator
class Decorator_MaximizeOwnDisc(Decorator):
def _scoring(self, case):
score = {Cell.BLACK: case.blackDisc,
Cell.WHITE: case.whiteDisc}[self._discType]
return (self._rate * score) + self._agent._scoring(case)
def _prepare(self, cases):
self._agent._prepare(cases)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright 2013 Netflix, Inc.
"""Utility classes
"""
from contextlib import contextmanager
import logging
import signal
import sys
class TimeoutError(Exception):
"""Timeout Error"""
pass
@contextmanager
def timeout(seconds, error_message="Timeout"):
"""Timeout context manager using SIGALARM."""
def _handle_timeout(signum, frame): # pylint:disable=unused-argument,missing-docstring
raise TimeoutError(error_message)
if seconds > 0:
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
yield
finally:
if seconds > 0:
signal.alarm(0)
def undecorate_func(func, decorators=None):
"""Finc the actual func behind any number of decorators
"""
if decorators is None:
decorators = []
if hasattr(func, "original_func"):
decorators.append(func)
return undecorate_func(getattr(func, "original_func"), decorators)
else:
return func, decorators
def try_import(module_name):
"""Attempt to import the given module (by name), returning a tuple (True, module object) or (False,None) on ImportError"""
try:
module = __import__(module_name)
return True, module
except ImportError:
return False, None
def call_chain(chain, *args, **kwargs):
if len(chain) == 1:
return chain[0](*args, **kwargs)
elif len(chain) == 2:
return chain[1](lambda: chain[0](*args, **kwargs))
elif len(chain) == 3:
return chain[2](lambda: chain[1](lambda: chain[0](*args, **kwargs)))
else:
raise Exception("call_chain is a hack and doesn't support chains longer than 3")
def profiling_wrapper(func):
import cProfile
import io
import pstats
pr = cProfile.Profile()
pr.enable()
try:
func()
finally:
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
def pdb_wrapper(func):
try:
return func()
except Exception:
import pdb
import traceback
etype, value, tb = sys.exc_info()
logging.info("Top level exception caught, entering debugger")
traceback.print_exc()
pdb.post_mortem(tb)
raise
|
nilq/baby-python
|
python
|
from typing import Any, List
from PySide6.QtGui import QColor
from PySide6.QtWidgets import QComboBox
from .ui import ColorPicker
class Optionable:
def __init__(self, **options):
self.options = options
def add_options(self, **options):
self.options.update(options)
def set_option(self, key: str, value: Any):
self.options[key] = value
def get_option(self, key: str) -> Any:
return self.options[key].value
def get_options(self, *keys: List[str]) -> Any:
return [self.get_option(key) for key in keys]
class Option:
def __init__(self, name: str, default: Any, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
self._value = default
self.default = default
@property
def value(self):
return self._value
def to_str(self):
return str(self.value)
def real_time_init(self, *args, **kwargs):
pass
|
nilq/baby-python
|
python
|
import os
from django.conf import settings
DEBUG = False
TEMPLATE_DEBUG = True
DATABASES = settings.DATABASES
# Update database configuration with $DATABASE_URL.
import dj_database_url
# import os
# import psycopg2
# import urllib.parse as up
# up.uses_netloc.append("postgres")
# url = up.urlparse(os.environ["DATABASE_URL"])
# conn = psycopg2.connect(
# database=url.path[1:],
# user=url.username,
# password=url.password,
# host=url.hostname,
# port=url.port
# )
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# DATABASES['default'] = dj_database_url.config(conn_max_age=500)
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# # PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static_root')
# # STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# STATIC_URL = '/static/'
# # Extra places for collectstatic to find static files.
# STATICFILES_DIRS = (
# os.path.join(PROJECT_ROOT, 'static'),
# )
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
nilq/baby-python
|
python
|
import os
broker_url = os.environ['REDIS_URL']
result_backend = os.environ['REDIS_URL']
broker_transport_options = {
'max_connections': 20
}
task_serializer = 'json'
result_serializer = 'json'
accept_content = ['json']
task_routes = {
# '{{cookiecutter.code_name}}.apps.app-name.tasks.*': {'queue': '{{cookiecutter.code_name}}'}
}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import numpy as np
from scipy.io.matlab import loadmat
from sklearn.metrics import pairwise_distances
import os
_ROOT = os.path.abspath(os.path.dirname(__file__))
lps_neighbor_shifts = {
'a': np.array([ 0, -1, 0]),
'ai': np.array([ 0, -1, -1]),
'as': np.array([ 0, -1, 1]),
'i': np.array([ 0, 0, -1]),
'l': np.array([1, 0, 0]),
'la': np.array([ 1, -1, 0]),
'lai': np.array([ 1, -1, -1]),
'las': np.array([ 1, -1, 1]),
'li': np.array([ 1, 0, -1]),
'lp': np.array([1, 1, 0]),
'lpi': np.array([ 1, 1, -1]),
'lps': np.array([1, 1, 1]),
'ls': np.array([1, 0, 1]),
'p': np.array([0, 1, 0]),
'pi': np.array([ 0, 1, -1]),
'ps': np.array([0, 1, 1]),
'r': np.array([-1, 0, 0]),
'ra': np.array([-1, -1, 0]),
'rai': np.array([-1, -1, -1]),
'ras': np.array([-1, -1, 1]),
'ri': np.array([-1, 0, -1]),
'rp': np.array([-1, 1, 0]),
'rpi': np.array([-1, 1, -1]),
'rps': np.array([-1, 1, 1]),
'rs': np.array([-1, 0, 1]),
's': np.array([0, 0, 1])}
neighbor_names = sorted(lps_neighbor_shifts.keys())
ras_neighbor_shifts = {
'a': np.array([0, 1, 0]),
'ai': np.array([ 0, 1, -1]),
'as': np.array([0, 1, 1]),
'i': np.array([ 0, 0, -1]),
'l': np.array([-1, 0, 0]),
'la': np.array([-1, 1, 0]),
'lai': np.array([-1, 1, -1]),
'las': np.array([-1, 1, 1]),
'li': np.array([-1, 0, -1]),
'lp': np.array([-1, -1, 0]),
'lpi': np.array([-1, -1, -1]),
'lps': np.array([-1, -1, 1]),
'ls': np.array([-1, 0, 1]),
'p': np.array([ 0, -1, 0]),
'pi': np.array([ 0, -1, -1]),
'ps': np.array([ 0, -1, 1]),
'r': np.array([1, 0, 0]),
'ra': np.array([1, 1, 0]),
'rai': np.array([ 1, 1, -1]),
'ras': np.array([1, 1, 1]),
'ri': np.array([ 1, 0, -1]),
'rp': np.array([ 1, -1, 0]),
'rpi': np.array([ 1, -1, -1]),
'rps': np.array([ 1, -1, 1]),
'rs': np.array([1, 0, 1]),
's': np.array([0, 0, 1])}
def get_dsi_studio_ODF_geometry(odf_key):
"""
Returns the default DSI studio odf vertices and odf faces for a
specified odf resolution
Parameters:
-----------
odf_key:str
Must be 'odf4', 'odf5', 'odf6', 'odf8', 'odf12' or 'odf20'
Returns:
--------
odf_vertices, odf_faces: np.ndarray
odf_vertices is (n,3) coordinates of the coordinate on the unit sphere and
odf_faces is an (m,3) array of triangles between ``odf_vertices``
Note:
------
Here are the properties of each odf resolution
Resolution: odf4
=====================
Unique angles: 81
N triangles: 160
Angluar Resolution: 17.216 +- 1.119
Resolution: odf5
=====================
Unique angles: 126
N triangles: 250
Angluar Resolution: 13.799 +- 0.741
Resolution: odf6
=====================
Unique angles: 181
N triangles: 360
Angluar Resolution: 11.512 +- 0.635
Resolution: odf8
=====================
Unique angles: 321
N triangles: 640
Angluar Resolution: 8.644 +- 0.562
Resolution: odf12
=====================
Unique angles: 721
N triangles: 1440
Angluar Resolution: 5.767 +- 0.372
Resolution: odf20
=====================
Unique angles: 2001
N triangles: 4000
Angluar Resolution: 3.462 +- 0.225
"""
m = loadmat(os.path.join(_ROOT,"data/odfs.mat"))
odf_vertices = m[odf_key + "_vertices"].T
odf_faces = m[odf_key + "_faces"].T
return odf_vertices, odf_faces
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) * 180 / np.pi
def compute_angular_probability(odf_vertices, ANGLE_MAX):
"""
Computes and returns a matrix where the (i,j) entry is the probability of
taking a step in direction j after a step in direction i
Parameters:
----------
odf_vertices: vector of tuples that specify the odf directions
ANGLE_MAX:float that specifies the maximum allowed distance between two
angles for one step to follow another
Returns:
-------
angular_probabilities: a matrix of floats where the i,j th
entry gives the probability of taking a step in direction j after a step in
direction i
The degree to which the similarity of angles dictate the probability can be
controlled through ANGULAR_SIM_STRENGTH
"""
ANGULAR_SIM_STRENGTH = 4
angular_probabilities = np.zeros((len(odf_vertices), len(odf_vertices)))
for i, angle_1 in enumerate(odf_vertices):
for j, angle_2 in enumerate(odf_vertices):
similarity = angle_between(angle_1,angle_2)
if similarity >= ANGLE_MAX:
angular_probabilities[i][j] = 0
else:
score = (180+similarity)/(180-similarity)
angular_probabilities[i][j] = (1./score)**ANGULAR_SIM_STRENGTH
angular_probabilities[i] = angular_probabilities[i]/angular_probabilities[i].sum()
return angular_probabilities
def get_transition_analysis_matrices(odf_order, angle_max,
angle_weight="flat", angle_weighting_power=1.):
"""
Convenience function that creates and returns all the necessary matrices
for iodf1 and iodf2
Parameters:
-----------
odf_order: "odf4", "odf6", "odf8" or "odf12"
A DSI Studio ODF order
angle_max: Maximum turning angle in degrees
angle_weights: "flat" or "weighted"
angle_weighting_order: int
How steep should the angle weights be? Only used when angle_weights=="weighted"
Returns:
---------
odf_vertices: np.ndarray (N,3)
Coordinates on the ODF sphere
prob_angles_weighted: np.ndarray(N/2,N/2)
Each i,j in this array is the probability of taking step j given that the
last step was i. The rows sum to 1.
"""
odf_vertices, odf_faces = get_dsi_studio_ODF_geometry(odf_order)
n_unique_vertices = odf_vertices.shape[0] // 2
angle_diffs = pairwise_distances(odf_vertices,metric=angle_between)
compatible_angles = angle_diffs < angle_max
if angle_weight == "flat":
prob_angles_weighted = \
compatible_angles.astype(np.float) / compatible_angles.sum(1)[:,np.newaxis]
elif angle_weight == "weighted":
prob_angles_weighted = ((180-angle_diffs)/(180+angle_diffs))**angle_weighting_power
# Zero out the illegal transitions
prob_angles_weighted = prob_angles_weighted * compatible_angles
prob_angles_weighted = prob_angles_weighted / prob_angles_weighted.sum(1)[:,np.newaxis]
# Collapse to n unique by n unique matrix
prob_angles_weighted = prob_angles_weighted[:n_unique_vertices, :n_unique_vertices] + prob_angles_weighted[n_unique_vertices:, :n_unique_vertices]
return odf_vertices, np.asfortranarray(prob_angles_weighted)
def weight_transition_probabilities_by_odf(odf, weight_matrix):
"""
Creates a matrix where i,j is the probability that angle j will be taken
after angle i, given the weights in odf.
"""
prob_angles_weighted = np.tile(odf[:,np.newaxis],
(weight_matrix.shape[1] // odf.shape[0], weight_matrix.shape[0])).T * weight_matrix
with np.errstate(divide='ignore', invalid='ignore'):
mat = prob_angles_weighted / prob_angles_weighted.sum(1)[:,np.newaxis]
return np.nan_to_num(mat)
def compute_weights_as_neighbor_voxels(odfs, weight_matrix):
"""
Creates a matrix where each row is a voxel and each column (j) contains the
probability of creating a trackable direction given you entered the voxel
with direction j.
Parameters:
------------
odfs: np.ndarray (n voxels, n unique angles)
odf data. MUST SUM TO 1 ACROSS ROWS
weight matrix: np.ndarray (n unique angles, n unique angles)
Conditional angle probabilities such as those returned by
``get_transition_analysis_matrices``. ALL ROWS MUST SUM TO 1
Returns:
--------
weights: np.ndarray (n voxels, n unique angles)
matrix where i,j is the probability of creating a trackable step after
entering voxel i by angle j
"""
return np.dot(odfs, weight_matrix)
def get_area_3d(v11, v12, v21,v22,direction,step_size=0.5):
''' 3D computation of the area in v1 from which a step of size STEPSIZE in direction direction will land in the area define by v2
'''
def overlap(min1, max1, min2, max2):
return max(0, min(max1, max2) - max(min1, min2)), max(min1,min2), min(max1,max2)
x_min = v21[0] - step_size*direction[0]
x_max = v22[0] - step_size*direction[0]
x_delta,x_start,x_end = overlap(v11[0],v12[0],x_min,x_max)
y_min = v21[1] - step_size*direction[1]
y_max = v22[1] - step_size*direction[1]
y_delta,y_start,y_end = overlap(v11[1],v12[1],y_min,y_max)
z_min = v21[2] - step_size*direction[2]
z_max = v22[2] - step_size*direction[2]
z_delta,z_start,z_end = overlap(v11[2],v12[2],z_min,z_max)
return x_delta*y_delta*z_delta, [x_start, y_start, z_start],[x_end,y_end,z_end]
|
nilq/baby-python
|
python
|
from post_processing_class import PostProcess
from post_processing_class import update_metrics_in_report_json
from post_processing_class import read_limits
from post_processing_class import check_limits_and_add_to_report_json
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import random
import time
import pytest
from fixture.application import Application
from fixture.orm import ORMFixture
from model.contact import Contact
from model.group import Group
@pytest.mark.skip(reason="XAMPP 8 ver")
def test_add_contact_to_group(app: Application, orm: ORMFixture):
if (len(orm.get_group_list()) == 0):
app.group.create(Group(name="inital_group"))
groups = orm.get_group_list()
random_group = random.choice(groups)
contacts_not_in_group = orm.get_contacts_not_in_group(random_group)
if len(contacts_not_in_group) == 0:
new_contact = Contact(firstname="inital_firstname",
lastname="inital_lastname")
app.contact.create(new_contact)
contacts_not_in_group = orm.get_contacts_not_in_group(random_group)
contact_to_attach = contacts_not_in_group[0]
app.contact.attach_contact_to_group(contact_to_attach, random_group)
time.sleep(2)
contacts_in_group = orm.get_contacts_in_group(random_group)
assert (any(filter(lambda x: x == contact_to_attach, contacts_in_group)))
|
nilq/baby-python
|
python
|
import unittest
from collatz import collatz_sequence as collatz
class CollatzTestCase(unittest.TestCase):
def test_base_case(self):
base_case = collatz(1)
self.assertListEqual(base_case, [1])
def test_3(self):
sequence = collatz(3)
self.assertListEqual(sequence, [3, 10, 5, 16, 8, 4, 2, 1])
def test_5(self):
sequence = collatz(5)
self.assertListEqual(sequence, [5, 16, 8, 4, 2, 1])
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from django.urls import re_path
from . import views
app_name = "curator"
urlpatterns = [
re_path(r"^upload$", views.UploadSpreadSheet.as_view(), name="upload_file"),
]
|
nilq/baby-python
|
python
|
"""
Anisha Kadri 2017
ak4114@ic.ac.uk
A Module containing methods to create networks from different models.
1) For pure preferential attachement:-
pref_att(N, m)
2) For random attachment:-
rand_att(N,m)
3) For a mixture of the two, attachment via random walk:-
walk_att(N,m,L)
References
----------
[1] A. L. Barabási and R. Albert "Emergence of scaling in
random networks", Science 286, pp 509-512, 1999.
"""
import networkx as nx
import random
import math
def pref_att(N, m, seed=None):
"""Returns a graph that is created using the Barabasi-Albert Model,
of N nodes in total and a node with m edges added at each time increment.
Parameters
----------
n = total number of nodes
m = number of edges attached to each new node, or degree of new node.
(value must be < N)
seed = optional argument, initialises random number generator to a starting state.
Returns
-------
A Barabasi Albert Graph, with pure preferential attachment.
"""
#this ensures that the maximum degree is always less than number of nodes
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, of equal degree
nodes = list(range(m))
G = nx.complete_graph(m)
G.name = "Graph with N = %s, m = %s"%(N,m)
# Target nodes for new edges
attach_list = nodes
# Maintains a list of nodes for random sampling,
# a concantenated edge list
# thus, number of instances of each node in the list is proportional to it's degree
# (i.e. the list has k_i instances of node i)
node_list=[]
for i in nodes:
node_list.extend([i]*m)
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
#add new edges to the list
node_list.extend(attach_list)
node_list.extend(new_stubs)
# m nodes are chosen from the edge_list to form new targets.
attach_list = set() # making this a set ensures that edges added are all unique (not a multigraph)
while len(attach_list)< m:
random_node =random.choice(node_list)
attach_list.add(random_node)
N_tot += 1
attach_list = list(attach_list)
return G
def rand_att(N,m, seed=None):
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, and no edges
G = nx.generators.classic.empty_graph(m)
G.name = "Graph with N = %s, m = %s"%(N,m)
# Target nodes for new edges
attach_list = nx.nodes(G)
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
node_list = nx.nodes(G)
# m nodes are chosen at random from the node_list to form new targets.
attach_list =random.sample(node_list, m)
N_tot += 1
return G
def random_walk(N,m, L, seed = None):
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, of equal degree
G = nx.complete_graph(m)
nodes = list(range(m))
# Target nodes for new edges
attach_list = nodes
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
node_list = nx.nodes(G)
# m nodes are chosen from the edge_list to form new targets.
attach_list = set() # making this a set ensures that edges added are all unique (not a multigraph)
random_list = set()
#uniformly choose start point of walk
while len(random_list)< m:
random_node =random.choice(node_list)
random_list.add(random_node)
N_tot += 1
#take a random walk of length L
for i in random_list:
node = i
steps=0
if steps<= L:
neighbours = G.neighbours(node)
random_node =random.choice(neighbours)
node = random_node
steps += 1
attach_list.add(node)
attach_list = list(attach_list)
return G
|
nilq/baby-python
|
python
|
import queue
import sys, re
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians, gcd
from itertools import accumulate, permutations, combinations, product, groupby, combinations_with_replacement
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from bisect import bisect, bisect_left
from heapq import heappush, heappop
from functools import reduce
def input():
return sys.stdin.readline().strip()
def INT():
return int(input())
def MAP():
return map(int, input().split())
def LIST():
return list(map(int, input().split()))
def ZIP(n):
return zip(*(MAP() for _ in range(n)))
sys.setrecursionlimit(10**9)
INF = float('inf')
mod = 10**9 + 7
YES = 'YES'
NO = 'NO'
class Node:
def __init__(self, n):
self.n = n
self.children = []
def add_child(self, child):
self.children = self.children + [child]
def get_find():
visited = set()
def find(cnt: int, dist: Node, n: Node):
cur_c = INF
for c in n.children:
if c in visited:
continue
visited.add(c)
if c.n == dist.n:
return cnt
else:
c = find(cnt + 1, dist, c)
cur_c = min(cur_c, c)
return cur_c
return find
def resolve():
N, Q = MAP()
G = [[] for _ in range(N)]
for i in range(N - 1):
a, b = map(int, input().split())
G[a - 1].append(b - 1)
G[b - 1].append(a - 1)
color = [-1] * N
color[0] = 0
que = queue.Queue()
que.put(0)
while not que.empty():
t = que.get()
for g in G[t]:
if color[g] == -1:
color[g] = 1 - color[t]
que.put(g)
for _ in range(Q):
i, j = MAP()
if color[i - 1] == color[j - 1]:
print("Town")
else:
print("Road")
if __name__ == "__main__":
resolve()
|
nilq/baby-python
|
python
|
from typing import Dict
from .logger import Logger
from google.cloud.logging_v2.client import Client
from google.cloud.logging_v2.resource import Resource
class StackDriverLogger(Logger):
def __init__(self, project_id, service_name, region):
self.client = Client(project=project_id)
self.project_id = project_id
self.service_name = service_name
self.region = region
def __get_resource(self):
return Resource(
type="cloud_run_revision",
labels={
"project_id": self.project_id,
"service_name": self.service_name,
"location": self.region,
})
def __log(self, severity: str, message: str, extra: Dict = None, exc_info=None):
trace = self.get_trace_id()
if extra or exc_info:
struct = {"message": message}
if extra:
struct["extra"] = extra
if exc_info:
struct["exception"] = exc_info
struct["serviceContext"] = {
"service": self.service_name
}
struct["@type"] = "type.googleapis.com/google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent"
self.client.logger(self.service_name).log_struct(struct, severity=severity, resource=self.__get_resource(), trace=trace)
else:
self.client.logger(self.service_name).log_text(message, severity=severity, resource=self.__get_resource(), trace=trace)
def debug(self, message: str, extra: Dict = None):
self.__log("DEBUG", message, extra=extra)
def info(self, message: str, extra: Dict = None):
self.__log("INFO", message, extra)
def warn(self, message: str, extra: Dict = None):
self.__log("WARNING", message, extra)
def error(self, message: str, extra: Dict = None, exc_info=None):
self.__log("ERROR", message, extra=extra, exc_info=exc_info)
|
nilq/baby-python
|
python
|
import unittest
from utils.transliteration import transliterate
class TestTransliterate(unittest.TestCase):
def test_english_string(self):
original = 'The quick brown fox jumps over the lazy dog'
result = transliterate(original)
self.assertEqual(original, result)
def test_english_string_with_punctuation_marks(self):
original = 'Hello, world!'
result = transliterate(original)
self.assertEqual(original, result)
def test_russian_string_with_punctuation_marks(self):
result = transliterate('Привет, как дела?')
self.assertEqual('Privet, kak dela?', result)
def test_russian_string_with_soft_signs(self):
result = transliterate('подъезд ель')
self.assertEqual("pod'ezd el'", result)
def test_russian_string_with_map_into_multiple_letters(self):
result = transliterate('Щелкунчик и друзья')
self.assertEqual("Schelkunchik i druz'ya", result)
def test_russian_string_with_all_letters(self):
result = transliterate('Съешь ещё этих мягких французских булок, да выпей чаю')
self.assertEqual("S'esh' eschyo etih myagkih frantsuzskih bulok, da vypey chayu", result)
def test_german_string_with_special_characters(self):
result = transliterate('Äpfel schöne Grüße')
self.assertEqual('Aepfel schoene Gruesse', result)
def test_greek_string(self):
result = transliterate('Θράσυλλος Ἑκατώνυµος καρακτηρ ῥυθμος')
self.assertEqual('Thrasyllos Ekatonymos karakter rythmos', result)
def test_remove_accents(self):
result = transliterate('Montréal, Mère, Françoise')
self.assertEqual('Montreal, Mere, Francoise', result)
|
nilq/baby-python
|
python
|
__author__ = 'Gaston C. Hillar'
import pyupm_th02 as upmTh02
import pyupm_i2clcd as upmLcd
import pyupm_servo as upmServo
import time
import paho.mqtt.client as mqtt
import json
class TemperatureServo:
def __init__(self, pin):
self.servo = upmServo.ES08A(pin)
self.servo.setAngle(0)
def print_temperature(self, temperature_fahrenheit):
angle = temperature_fahrenheit
if angle < 0:
angle = 0
elif angle > 180:
angle = 180
self.servo.setAngle(angle)
class Oled:
# The I2C address for the OLED display
oled_i2c_address = 0x3C
def __init__(self, bus, red, green, blue):
self.oled = upmLcd.SSD1327(
bus,
self.__class__.oled_i2c_address)
self.oled.clear()
def print_line(self, row, message):
self.oled.setCursor(row, 0)
self.oled.setGrayLevel(12)
self.oled.write(message)
class TemperatureAndHumidityOled(Oled):
def print_temperature(self, temperature_fahrenheit, temperature_celsius):
self.oled.clear()
self.print_line(0, "Temperature")
self.print_line(2, "Fahrenheit")
self.print_line(3, "{:5.2f}".format(temperature_fahrenheit))
self.print_line(5, "Celsius")
self.print_line(6, "{:5.2f}".format(temperature_celsius))
def print_humidity(self, humidity):
self.print_line(8, "Humidity")
self.print_line(9, "Level")
self.print_line(10, "{0}%".format(humidity))
class TemperatureAndHumiditySensor:
def __init__(self, bus):
self.th02_sensor = upmTh02.TH02(bus)
self.temperature_celsius = 0.0
self.temperature_fahrenheit = 0.0
self.humidity = 0.0
def measure_temperature_and_humidity(self):
# Retrieve the temperature expressed in Celsius degrees
temperature_celsius = self.th02_sensor.getTemperature()
self.temperature_celsius = temperature_celsius
self.temperature_fahrenheit = \
(temperature_celsius * 9.0 / 5.0) + 32.0
# Retrieve the humidity
self.humidity = self.th02_sensor.getHumidity()
class MessageTopic:
command_key = "command"
successfully_processed_command_key = "successfully_processed_command"
# Replace with your own topic name
topic = "iot-python-gaston-hillar/temperature"
active_instance = None
def __init__(self, temperature_servo, oled):
self.temperature_servo = temperature_servo
self.oled = oled
self.client = mqtt.Client()
self.client.on_connect = MessageTopic.on_connect
self.client.on_message = MessageTopic.on_message
self.client.connect(host="iot.eclipse.org",
port=1883,
keepalive=60)
MessageTopic.active_instance = self
def loop(self):
self.client.loop()
@staticmethod
def on_connect(client, userdata, flags, rc):
print("Connected to the {0} topic".
format(MessageTopic.topic))
subscribe_result = client.subscribe(MessageTopic.topic)
publish_result_1 = client.publish(
topic=MessageTopic.topic,
payload="Listening to messages in the Intel Galileo Gen 2 board")
@staticmethod
def on_message(client, userdata, msg):
if msg.topic == MessageTopic.topic:
print("I've received the following message: {0}".format(str(msg.payload)))
try:
message_dictionary = json.loads(msg.payload)
if MessageTopic.command_key in message_dictionary:
if message_dictionary[MessageTopic.command_key] == "print_temperature_fahrenheit":
MessageTopic.active_instance.temperature_servo.print_temperature(
message_dictionary["temperature_fahrenheit"])
MessageTopic.active_instance.publish_response_message(
message_dictionary)
elif message_dictionary[MessageTopic.command_key] == "print_information_message":
MessageTopic.active_instance.oled.print_line(
11, message_dictionary["text"])
MessageTopic.active_instance.publish_response_message(message_dictionary)
except ValueError:
# msg is not a dictionary
# No JSON object could be decoded
pass
def publish_response_message(self, message):
response_message = json.dumps({
self.__class__.successfully_processed_command_key:
message[self.__class__.command_key]})
result = self.client.publish(topic=self.__class__.topic,
payload=response_message)
return result
if __name__ == "__main__":
temperature_and_humidity_sensor = \
TemperatureAndHumiditySensor(0)
oled = TemperatureAndHumidityOled(0)
temperature_servo = TemperatureServo(3)
message_topic = MessageTopic(temperature_servo, oled)
while True:
temperature_and_humidity_sensor.\
measure_temperature_and_humidity()
oled.print_temperature(
temperature_and_humidity_sensor.temperature_fahrenheit,
temperature_and_humidity_sensor.temperature_celsius)
oled.print_humidity(
temperature_and_humidity_sensor.humidity)
print("Ambient temperature in degrees Celsius: {0}".
format(temperature_and_humidity_sensor.temperature_celsius))
print("Ambient temperature in degrees Fahrenheit: {0}".
format(temperature_and_humidity_sensor.temperature_fahrenheit))
print("Ambient humidity: {0}".
format(temperature_and_humidity_sensor.humidity))
# Sleep 10 seconds (10000 milliseconds) but process messages every 1 second
for i in range(0, 10):
message_topic.loop()
time.sleep(1)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.