content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from structure_generator.structure import Structure
from structure_generator.argument import Argument
from minecraft_environment.position import Position
from minecraft_environment.minecraft import fill, clone, setblock, base_block, redstone_dust, redstone_torch, repeater
class Decoder(Structure):
def __init__(self):
pass
def register_arguments(self):
return [
Argument("FILE", "Where's the source file?"),
Argument("FACING", "What direction are you facing?", ['north','south','east','west']),
Argument("OUTPUT_SIDE", "What side do the outputs build towards?", ['left', 'right']),
Argument("BUILD_TO", "What direction is the structure being built towards?", ['left', 'right']),
Argument("OFFSET", "Where is the structure being moved to?"),
]
def generate_build(self, file, facing, outputs_to, build_to, offset):
with open(file.replace('"','')) as f:
table = f.readlines()
self.width = len(table[0].strip())*2-1
self.height = 7
self.length = len(table)*2-1
self.facing = {'east':0,'west':2,'south':1,'north':3}[facing]
self.build_to = build_to
self.offset = int(offset.split(',')[0]),int(offset.split(',')[1]),int(offset.split(',')[2])
self.right = outputs_to == 'right'
self.modules = self._parse_modules(table)
self._solve_modueles()
self.moduleLocationX = [0] * 8
self.moduleLocationY = [0] * 8
self.commands = []
self._build_outputs()
self._build_modules()
self._place_repeaters()
if self.build_to == 'left': self._translate(0,0,-self.width)
self._rotate(self.facing)
self._translate(*self.offset)
def _parse_modules(self, table):
modules = [[0 for j in range(len(table[0].strip()))] for i in range(len(table))]
expected_width = len(table[0].strip())
for x, row in enumerate(table):
if len(row.strip()) != expected_width:
raise Exception("Uneven row lengths")
for z, bit in enumerate(row.strip()):
if bit == '0': modules[x][-z-1] = 2
elif bit == '1': modules[x][-z-1] = 1
elif bit in ['x','X','-']: modules[x][-z-1] = 0
else: raise Exception("Unrecognized character {}".format(bit))
return modules
def _solve_modueles(self):
for x in range(len(self.modules)-2, 0, -1):
for z in range(len(self.modules[x])-1):
if self.modules[x][z] == 2 and self.modules[x-1][z] == 2:
self.modules[x-1][z] = 0;
self.modules[x][z] = 3;
for x in range(len(self.modules)-2, 0, -1):
for z in range(len(self.modules[x])-1):
if self.modules[x][z] == 3 and self.modules[x][z+1] == 3:
self.modules[x][z] = 6
self.modules[x][z+1] = 7
self.modules[x-1][z] = 4
self.modules[x-1][z+1] = 5
def _build_output_lane(self, Y):
length = len(self.modules[0]) * 2;
self.commands.append(fill(Position(length + 2, -4, Y + 1), Position(2, -4, Y + 1), base_block()))
self.commands.append(fill(Position(length + 2, -3, Y + 1), Position(2, -3, Y + 1), redstone_dust()))
torchX = length + 3
if self.right: torchX = 1
torchDir = '+x'
if self.right: torchDir = '-x'
#self.commands.append(setblock(Position(torchX, -4, Y + 1), redstone_torch(torchDir)))
def _copy_output_lanes(self, count, Y):
length = len(self.modules[0]) * 2;
depth = count * 2 - 1;
self.commands.append(clone(
Position(1, -3, 2),
Position(length + 3, -4, 2 + depth),
Position(1, -4, Y)))
def _build_outputs(self):
self._build_output_lane(2)
availableLanes = 1;
leftToBuild = len(self.modules) - 1;
y = 1;
while leftToBuild > 0:
toCopy = min(availableLanes, leftToBuild);
self._copy_output_lanes(toCopy, y * 2 + 2);
availableLanes += toCopy;
leftToBuild -= toCopy;
y += toCopy;
def _copy_module(self, xsrc, ysrc, xdest, ydest):
Xsrc = xsrc * 2
Ysrc = ysrc * 2
Xdest = xdest * 2
Ydest = ydest * 2
self.commands.append(clone(
Position(Xsrc, -4, Ysrc),
Position(Xsrc + 1, 1, Ysrc + 1),
Position(Xdest, -4, Ydest)))
def _copy_module_line(self, xsrc, ysrc, count, xdest, ydest):
Xsrc = xsrc * 2
Ysrc = ysrc * 2
Xdest = xdest * 2
Ydest = ydest * 2
self.commands.append(clone(
Position(Xsrc, -4, Ysrc),
Position(Xsrc + 2 * count + 1, 1, Ysrc + 1),
Position(Xdest, -4, Ydest)))
def _build_modules(self):
self.moduleLocationX = [0] * 8
self.moduleLocationY = [0] * 8
for y in range(len(self.modules)):
for x in range(len(self.modules[y])):
besty = 0;
bestcount = 0;
for py in range(y-1, -1, -1): # for every module below this one
if self.modules[y][x] == self.modules[py][x]: # if the module below matches
px = x;
while px < len(self.modules[y]) and self.modules[y][px] == self.modules[py][px]: px += 1 # find the first pair of self.modules to the right that doesn't match
count = px - x - 1; #get the difference between the two
if count > bestcount:
bestcount = count
besty = py #take the greatest count and lock in that line
# in other words, this algorithm finds the longest pair of matching lines
# this of course happens every iteration
if bestcount > 1: #clone the line to save on some commands
self._copy_module_line(x + 1, besty + 1, bestcount,
x + 1, y + 1);
x += bestcount
if x >= len(self.modules[y]): # skip the line if it was successfully able to copy it in it's entirty
break
module = self.modules[y][x];
builder_function = [self._build_module_0,self._build_module_1,self._build_module_2,self._build_module_3,self._build_module_4,self._build_module_5,self._build_module_6,self._build_module_7]
if self.moduleLocationX[module] == 0 and self.moduleLocationY[module] == 0:
builder_function[module](x + 1, y + 1)
self.moduleLocationX[module] = x + 1
self.moduleLocationY[module] = y + 1
else:
self._copy_module(self.moduleLocationX[module], self.moduleLocationY[module], x + 1, y + 1) #these copy the module if it was built before
if module == 3:
self.commands.append(setblock(Position(x * 2 + 4, -3, y * 2 + 2), base_block()))
self._build_inputs()
def _build_module_0(self, x, y):
X = x * 2
Y = y * 2
self.commands.append(fill(Position(X, -2, Y), Position(X, -2, Y + 1), base_block()))
self.commands.append(fill(Position(X, -1, Y), Position(X, -1, Y + 1), redstone_dust()))
def _build_module_1(self, x, y):
self.commands.append(fill(Position(x * 2, -2, y * 2), Position(x * 2, -2, y * 2 + 1), base_block()))
self.commands.append(fill(Position(x * 2, -1, y * 2), Position(x * 2, -1, y * 2 + 1), redstone_dust()))
self.commands.append(setblock(Position(x * 2 + 1, -2, y * 2 + 1), redstone_torch('+x')))
def _build_module_2(self, x, y):
self.commands.append(setblock(Position(x * 2, -2, y * 2 + 1), base_block()))
self.commands.append(setblock(Position(x * 2, -3, y * 2), base_block()))
self.commands.append(setblock(Position(x * 2, -2, y * 2), repeater('+z')))
self.commands.append(fill(Position(x * 2, -1, y * 2), Position(x * 2, -1, y * 2 + 1), base_block()))
self.commands.append(fill(Position(x * 2, 0, y * 2), Position(x * 2, 0, y * 2 + 1), redstone_dust()))
def _build_module_3(self, x, y):
X = x * 2
Y = y * 2
self.commands.append(setblock(Position(X, -2, Y + 1), base_block()))
self.commands.append(setblock(Position(X, -3, Y), base_block()))
self.commands.append(setblock(Position(X, -1, Y + 1), redstone_dust()))
self.commands.append(setblock(Position(X, -2, Y), redstone_dust()))
self.commands.append(setblock(Position(X + 1, -4, Y), base_block()))
self.commands.append(setblock(Position(X + 1, -3, Y), repeater('+x')))
self.commands.append(setblock(Position(X + 2, -3, Y), base_block()))
def _build_module_4(self, x, y):
X = x * 2
Y = y * 2
self.commands.append(setblock(Position(X, -1, Y), base_block()))
self.commands.append(setblock(Position(X, 0, Y), redstone_dust()))
self.commands.append(fill(Position(X + 1, -2, Y + 1), Position(X, -2, Y + 1), base_block()))
self.commands.append(fill(Position(X + 1, -1, Y + 1), Position(X, -1, Y + 1), redstone_dust()))
self.commands.append(setblock(Position(X, 0, Y + 1), base_block()))
self.commands.append(setblock(Position(X, 1, Y + 1), redstone_dust()))
def _build_module_5(self, x, y):
X = x * 2
Y = y * 2
self.commands.append(setblock(Position(X, -1, Y), base_block()))
self.commands.append(setblock(Position(X, 0, Y), redstone_dust()))
self.commands.append(setblock(Position(X, -2, Y + 1), base_block()))
self.commands.append(setblock(Position(X, -1, Y + 1), redstone_dust()))
self.commands.append(setblock(Position(X, 0, Y + 1), base_block()))
self.commands.append(setblock(Position(X, 1, Y + 1), redstone_dust()))
def _build_module_6(self, x, y):
X = x * 2
Y = y * 2
self.commands.append(fill(Position(X, -3, Y), Position(X, -1, Y), base_block()))
self.commands.append(setblock(Position(X, -2, Y), repeater('-z')))
self.commands.append(setblock(Position(X, 0, Y), redstone_dust()))
self.commands.append(setblock(Position(X, -2, Y + 1), base_block()))
self.commands.append(setblock(Position(X, -1, Y + 1), redstone_dust()))
self.commands.append(setblock(Position(X + 1, -3, Y), base_block()))
self.commands.append(setblock(Position(X + 1, -2, Y), redstone_dust()))
def _build_module_7(self, x, y):
X = x * 2
Y = y * 2
self.commands.append(fill(Position(X, -3, Y), Position(X, -1, Y), base_block()))
self.commands.append(setblock(Position(X, -2, Y), repeater('-z')))
self.commands.append(setblock(Position(X, 0, Y), redstone_dust()))
self.commands.append(setblock(Position(X, -2, Y + 1), base_block()))
self.commands.append(setblock(Position(X, -1, Y + 1), redstone_dust()))
def _build_inputs(self):
self.commands.append(fill(Position(2, -2, 0), Position(2, -2, 1), base_block()))
self.commands.append(setblock(Position(2, -1, 0), repeater('+z')))
self.commands.append(setblock(Position(2, -1, 1), redstone_dust()))
availableLanes = 1
leftToBuild = len(self.modules[0]) - 1
x = 1
while leftToBuild > 0:
toCopy = min(availableLanes, leftToBuild)
depth = toCopy * 2 - 1
self.commands.append(clone(Position(2, -2, 0), Position(2 + depth, -1, 1), Position(x * 2 + 2, -2, 0)))
availableLanes += toCopy
leftToBuild -= toCopy
x += toCopy
def _place_repeaters(self):
# Input Lanes
for x in range(len(self.modules[0])):
X = x * 2 + 2
for y in range(7,len(self.modules),7):
module = self.modules[y][x]
if module == 4 or module == 5: y -= 1
module = self.modules[y][x]
Y = y * 2 + 2
if module == 0 or module == 1:
self.commands.append(setblock(Position(X, -1, Y), repeater('+z')))
elif module == 2:
self.commands.append(fill(Position(X, 0, Y), Position(X, -1, Y + 1), air()))
self.commands.append(setblock(Position(X, -1, Y + 1), redstone_dust()))
elif module == 3:
self.commands.append(setblock(Position(X, -1, Y - 1), repeater('+z')))
self.commands.append(setblock(Position(X, -1, Y), base_block()))
elif module == 6 or module == 7:
self.commands.append(setblock(Position(X, 0, Y), repeater('+z')))
self.commands.append(setblock(Position(X, 0, Y + 1), base_block()))
# Output Lanes
repeaterDir = '+x'
if self.right: repeaterDir = '-x'
for x in range(7, len(self.modules[0]), 7):
X = x * 2 + 2
for y in range(len(self.modules)):
Y = y * 2 + 2
module = self.modules[y][x]
if module == 1 or module == 6:
self.commands.append(setblock(Position(X, -3, Y + 1), repeater(repeaterDir)))
else:
self.commands.append(setblock(Position(X + 1, -3, Y + 1), repeater(repeaterDir)))
|
nilq/baby-python
|
python
|
"""
This module provides fittable models based on 2D images.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
import logging
import numpy as np
import copy
from astropy.modeling import Fittable2DModel
from astropy.modeling.parameters import Parameter
__all__ = ['FittableImageModel2D', 'NonNormalizable']
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler(level=logging.INFO))
class NonNormalizable(Warning):
"""
Used to undicate that a :py:class:`FittableImageModel2D` model is
non-normalizable.
"""
pass
class FittableImageModel2D(Fittable2DModel):
"""
A fittable 2D model of an image allowing for image intensity scaling
and image translations.
This class takes 2D image data and computes the
values of the model at arbitrary locations (including at intra-pixel,
fractional positions) within this image using spline interpolation
provided by :py:class:`~scipy.interpolate.RectBivariateSpline`.
The fittable model provided by this class has three model parameters:
an image intensity scaling factor (`flux`) which is applied to
(normalized) image, and two positional parameters (`x_0` and `y_0`)
indicating the location of a feature in the coordinate grid on which
the model is to be evaluated.
If this class is initialized with `flux` (intensity scaling factor)
set to `None`, then `flux` is be estimated as ``|sum(data)|``.
Parameters
----------
data : numpy.ndarray
Array containing 2D image.
origin : tuple, None, optional
A reference point in the input image ``data`` array. When origin is
`None`, origin will be set at the middle of the image array.
If `origin` represents the location of a feature (e.g., the position
of an intensity peak) in the input ``data``, then model parameters
`x_0` and `y_0` show the location of this peak in an another target
image to which this model was fitted. Fundamentally, it is the
coordinate in the model's image data that should map to
coordinate (`x_0`, `y_0`) of the output coordinate system on which the
model is evaluated.
Alternatively, when `origin` is set to ``(0,0)``, then model parameters
`x_0` and `y_0` are shifts by which model's image should be translated
in order to match a target image.
normalize : bool, optional
Indicates whether or not the model should be build on normalized
input image data. If true, then the normalization constant (*N*) is
computed so that
.. math::
N \cdot C \cdot |\Sigma_{i,j}D_{i,j}| = 1,
where *N* is the normalization constant, *C* is correction factor
given by the parameter ``correction_factor``, and :math:`D_{i,j}` are
the elements of the input image ``data`` array.
correction_factor : float, optional
A strictly positive number that represents correction that needs to
be applied to model's `flux`. This parameter affects the value of
the normalization factor (see ``normalize`` for more details).
A possible application for this parameter is to account for aperture
correction. Assuming model's data represent a PSF to be fitted to
some target star, we set ``correction_factor`` to the aperture
correction that needs to be applied to the model.
Then, best fitted value of the `flux` model
parameter will represent an aperture-corrected flux of the target star.
fill_value : float, optional
The value to be returned by the `evaluate` or
``astropy.modeling.Model.__call__`` methods
when evaluation is performed outside the definition domain of the
model.
ikwargs : dict, optional
Additional optional keyword arguments to be passed directly to the
`compute_interpolator` method. See `compute_interpolator` for more
details.
"""
flux = Parameter(description='Intensity scaling factor for image data.',
default=None)
x_0 = Parameter(description='X-position of a feature in the image in '
'the output coordinate grid on which the model is '
'evaluated.', default=0.0)
y_0 = Parameter(description='Y-position of a feature in the image in '
'the output coordinate grid on which the model is '
'evaluated.', default=0.0)
def __init__(self, data, flux=flux.default,
x_0=x_0.default, y_0=y_0.default,
normalize=False, correction_factor=1.0,
origin=None, fill_value=0.0, ikwargs={}):
self._fill_value = fill_value
self._img_norm = None
self._normalization_status = 0 if normalize else 2
self._store_interpolator_kwargs(ikwargs)
if correction_factor <= 0:
raise ValueError("'correction_factor' must be strictly positive.")
self._correction_factor = correction_factor
self._data = np.array(data, copy=True, dtype=np.float64)
if not np.all(np.isfinite(self._data)):
raise ValueError("All elements of input 'data' must be finite.")
# set input image related parameters:
self._ny, self._nx = self._data.shape
self._shape = self._data.shape
if self._data.size < 1:
raise ValueError("Image data array cannot be zero-sized.")
# set the origin of the coordinate system in image's pixel grid:
self.origin = origin
if flux is None:
if self._img_norm is None:
self._img_norm = self._compute_raw_image_norm(self._data)
flux = self._img_norm
self._compute_normalization(normalize)
super(FittableImageModel2D, self).__init__(flux, x_0, y_0)
# initialize interpolator:
self.compute_interpolator(ikwargs)
def _compute_raw_image_norm(self, data):
"""
Helper function that computes the uncorrected inverse normalization
factor of input image data. This quantity is computed as the
*absolute value* of the *sum of all pixel values*.
.. note::
This function is intended to be overriden in a subclass if one
desires to change the way the normalization factor is computed.
"""
return np.abs(np.sum(self._data, dtype=np.float64))
def _compute_normalization(self, normalize):
"""
Helper function that computes the inverse normalization factor of the
original image data. This quantity is computed as the *absolute value*
of the the sum of pixel values. Computation is performed only if this
sum has not been previously computed. Otherwise, the existing value is
not modified as :py:class:`FittableImageModel2D` does not allow image
data to be modified after the object is created.
.. note::
Normally, this function should not be called by the end-user. It
is intended to be overriden in a subclass if one desires to change
the way the normalization factor is computed.
"""
self._normalization_constant = 1.0 / self._correction_factor
if normalize:
# compute normalization constant so that
# N*C*sum(data) = 1:
if self._img_norm is None:
self._img_norm = self._compute_raw_image_norm(self._data)
if self._img_norm != 0.0 and np.isfinite(self._img_norm):
self._normalization_constant /= self._img_norm
self._normalization_status = 0
else:
self._normalization_constant = 1.0
self._normalization_status = 1
warnings.warn("Overflow encountered while computing "
"normalization constant. Normalization "
"constant will be set to 1.", NonNormalizable)
else:
self._normalization_status = 2
@property
def data(self):
""" Get original image data. """
return self._data
@property
def normalized_data(self):
""" Get normalized and/or intensity-corrected image data. """
return (self._normalization_constant * self._data)
@property
def normalization_constant(self):
""" Get normalization constant. """
return self._normalization_constant
@property
def normalization_status(self):
"""
Get normalization status. Possible status values are:
- 0: **Performed**. Model has been successfuly normalized at
user's request.
- 1: **Failed**. Attempt to normalize has failed.
- 2: **NotRequested**. User did not request model to be normalized.
"""
return self._normalization_status
@property
def correction_factor(self):
"""
Set/Get flux correction factor.
.. note::
When setting correction factor, model's flux will be adjusted
accordingly such that if this model was a good fit to some target
image before, then it will remain a good fit after correction
factor change.
"""
return self._correction_factor
@correction_factor.setter
def correction_factor(self, correction_factor):
old_cf = self._correction_factor
self._correction_factor = correction_factor
self._compute_normalization(normalize=self._normalization_status != 2)
# adjust model's flux so that if this model was a good fit to some
# target image, then it will remain a good fit after correction factor
# change:
self.flux *= correction_factor / old_cf
@property
def shape(self):
"""A tuple of dimensions of the data array in numpy style (ny, nx)."""
return self._shape
@property
def nx(self):
"""Number of columns in the data array."""
return self._nx
@property
def ny(self):
"""Number of rows in the data array."""
return self._ny
@property
def origin(self):
"""
A tuple of ``x`` and ``y`` coordinates of the origin of the coordinate
system in terms of pixels of model's image.
When setting the coordinate system origin, a tuple of two `int` or
`float` may be used. If origin is set to `None`, the origin of the
coordinate system will be set to the middle of the data array
(``(npix-1)/2.0``).
.. warning::
Modifying `origin` will not adjust (modify) model's parameters
`x_0` and `y_0`.
"""
return (self._x_origin, self._y_origin)
@origin.setter
def origin(self, origin):
if origin is None:
self._x_origin = (self._nx - 1) / 2.0
self._y_origin = (self._ny - 1) / 2.0
elif hasattr(origin, '__iter__') and len(origin) == 2:
self._x_origin, self._y_origin = origin
else:
raise TypeError("Parameter 'origin' must be either None or an "
"iterable with two elements.")
@property
def x_origin(self):
"""X-coordinate of the origin of the coordinate system."""
return self._x_origin
@property
def y_origin(self):
"""Y-coordinate of the origin of the coordinate system."""
return self._y_origin
@property
def fill_value(self):
"""Fill value to be returned for coordinates outside of the domain of
definition of the interpolator. If ``fill_value`` is `None`, then
values outside of the domain of definition are the ones returned
by the interpolator.
"""
return self._fill_value
@fill_value.setter
def fill_value(self, fill_value):
self._fill_value = fill_value
def _store_interpolator_kwargs(self, ikwargs):
"""
This function should be called in a subclass whenever model's
interpolator is (re-)computed.
"""
self._interpolator_kwargs = copy.deepcopy(ikwargs)
@property
def interpolator_kwargs(self):
"""
Get current interpolator's arguments used when interpolator was
created.
"""
return self._interpolator_kwargs
def compute_interpolator(self, ikwargs={}):
"""
Compute/define the interpolating spline. This function can be overriden
in a subclass to define custom interpolators.
Parameters
----------
ikwargs : dict, optional
Additional optional keyword arguments. Possible values are:
- **degree** : int, tuple, optional
Degree of the interpolating spline. A tuple can be used to
provide different degrees for the X- and Y-axes.
Default value is degree=3.
- **s** : float, optional
Non-negative smoothing factor. Default value s=0 corresponds to
interpolation.
See :py:class:`~scipy.interpolate.RectBivariateSpline` for more
details.
Notes
-----
* When subclassing :py:class:`FittableImageModel2D` for the
purpose of overriding :py:func:`compute_interpolator`,
the :py:func:`evaluate` may need to overriden as well depending
on the behavior of the new interpolator. In addition, for
improved future compatibility, make sure
that the overriding method stores keyword arguments ``ikwargs``
by calling ``_store_interpolator_kwargs`` method.
* Use caution when modifying interpolator's degree or smoothness in
a computationally intensive part of the code as it may decrease
code performance due to the need to recompute interpolator.
"""
from scipy.interpolate import RectBivariateSpline
if 'degree' in ikwargs:
degree = ikwargs['degree']
if hasattr(degree, '__iter__') and len(degree) == 2:
degx = int(degree[0])
degy = int(degree[1])
else:
degx = int(degree)
degy = int(degree)
if degx < 0 or degy < 0:
raise ValueError("Interpolator degree must be a non-negative "
"integer")
else:
degx = 3
degy = 3
if 's' in ikwargs:
smoothness = ikwargs['s']
else:
smoothness = 0
x = np.arange(self._nx, dtype=np.float)
y = np.arange(self._ny, dtype=np.float)
self.interpolator = RectBivariateSpline(
x, y, self._data.T, kx=degx, ky=degx, s=smoothness
)
self._store_interpolator_kwargs(ikwargs)
def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the model on some input variables and provided model
parameters.
"""
xi = np.asarray(x, dtype=np.float) + (self._x_origin - x_0)
yi = np.asarray(y, dtype=np.float) + (self._y_origin - y_0)
f = flux * self._normalization_constant
evaluated_model = f * self.interpolator.ev(xi, yi)
if self._fill_value is not None:
# find indices of pixels that are outside the input pixel grid and
# set these pixels to the 'fill_value':
invalid = (((xi < 0) | (xi > self._nx - 1)) |
((yi < 0) | (yi > self._ny - 1)))
evaluated_model[invalid] = self._fill_value
return evaluated_model
|
nilq/baby-python
|
python
|
from .resource import *
from .manager import *
from .dist_manager import *
|
nilq/baby-python
|
python
|
from itertools import product
import cv2
import pytest
from sklearn.decomposition import PCA
from sklearn.preprocessing import QuantileTransformer, StandardScaler, MinMaxScaler
from qudida import DomainAdapter
def params_combinations():
return product(
(QuantileTransformer(n_quantiles=255),
StandardScaler(),
MinMaxScaler(),
PCA(n_components=2),
),
((None, None),
(cv2.COLOR_BGR2YCrCb, cv2.COLOR_YCrCb2BGR),
(cv2.COLOR_BGR2HSV, cv2.COLOR_HSV2BGR),
),
)
@pytest.mark.parametrize('transformer,color_conversions',
params_combinations()
)
def test_transform(transformer, color_conversions):
adapter = DomainAdapter(transformer=transformer,
ref_img=cv2.imread('target.png'),
color_conversions=color_conversions,
)
source = cv2.imread('source.png')
res = adapter(source)
assert res.shape == source.shape
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pub2sd
----------------------------------
Tests for `bibterm2dict` module.
or will be when I get how to reference functions in BibTerm2Dict.py figured out!
"""
import unittest
#import bibterm2dict
#from bibterm2dict import BibTerm2Dict
class TestPub2SD(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
# tout = bibterm2dict.BibTerm2Dict.hello_world()
# print(SCRIPT_DIR)
# assert(hello_world() == "Hello world!")
assert(True)
pass
#test write empty project file
#test read empty project file?
#test verify empty project file
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from setuptools import setup
import os
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.md")) as f:
readme = f.read()
setup(
name="json_tabularize",
version="1.0.3", # change this every time I release a new version
packages=[
"json_tabularize",
],
package_dir={"json_tabularize": 'src'},
package_data={
},
include_package_data=True,
install_requires=[
"genson",
],
extras_require={
},
description="Get deeply nested JSON into tabular format",
long_description=readme,
long_description_content_type="text/markdown",
license="MIT/X",
author="Mark Johnston Olson",
author_email="mjolsonsfca@gmail.com",
url="https://github.com/molsonkiko/json_tabularize",
# scripts=[ # maybe __main__ should be considered a script?
# ],
keywords=[
"json",
],
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
],
)
|
nilq/baby-python
|
python
|
import numpy as np
import pickle, os
class DataSho():
def __init__(self, pro):
self.pro = pro
def load(self, name):
with open(os.path.join(self.pro.path, name+'.pkl'), 'rb') as f:
return pickle.load(f)
def show_items(self):
return 'Dates', 'Timing', 'user', 'IP', 'state'
def show_appro(self):
chart = {'type': 'column'}
title = {'text': 'Analyzed log'}
date_dict = self.load('Dates')
xAxis = {'categories': list(date_dict.keys())}
yAxis = {'title': {'text': 'log10(Numbers)'}, 'type': 'logarithmic'}
success = []
failure = []
log = True
for i in date_dict.keys():
a = date_dict[i][:, 3]
#success.append(np.log10(np.sum(a == 'Success')+1))
#failure.append(np.log10(np.sum(a == 'Failure')+1))
success.append(np.sum(a == 'Success'))
failure.append(np.sum(a == 'Failure'))
series = {"Success": success, "Failure": failure}
#return series
return chart, title, xAxis, yAxis, series
#def show_det(self, return_value):
# return self.show_date(return_value)
def show_det(self, dates):
dating = self.load('Dates')[dates]
users = list(set(dating[:, 1]))
user_ips = []
for u in users:
rows = (dating[:, 1] == u)
ips = list(set(dating[rows, 2]))
user_ips.extend([(u, ip) for ip in ips])
success = []
failure = []
for user_ip in user_ips:
rows = (dating[:, 1] == user_ip[0])
a = dating[rows, :]
rows = (a[:, 2] == user_ip[1])
a = a[rows, :]
success.append(np.sum(a=='Success'))
failure.append(np.sum(a=='Failure'))
user_ips = np.array(user_ips)
users = list(user_ips[:, 0])
ips = list(user_ips[:, 1])
return ["user", "ip", "Success", "Failure"], users, ips, success, failure
def show_all(self):
user_ips = self.load('user')
users = []
ips = []
datings = []
success = []
failure = []
for user_ip, mat in user_ips.items():
dating = list(set(mat[:, 0]))
for d in dating:
users.append(user_ip[0])
ips.append(user_ip[1])
datings.append(d)
rows = (mat[:, 0] == d)
a = mat[rows, 2]
success.append(np.sum(a=='Success'))
failure.append(np.sum(a=='failure'))
namelist = ["user", "ip", "Date", "Success", "Failure"]
return namelist, users, ips, datings, success, failure
|
nilq/baby-python
|
python
|
import discord
import asyncio
from discord.ext import commands
import datetime
import random
import aiohttp
class Utility(commands.Cog):
def __init__(self, bot):
self.bot = bot
client = bot
@commands.Cog.listener()
async def on_ready(self):
print("Cog: Utility, Is Ready!")
@commands.command(aliases=['whois', 'usrinfo', 'info', 'userstats', 'ui'])
async def userinfo(self, ctx, *, user: discord.Member = None):
if user is None:
user = ctx.author
date_format = "%a, %d %b %Y %I:%M %p"
embed = discord.Embed(description=user.mention,
color=discord.Color.random())
embed.set_author(name=str(user), icon_url=user.avatar_url)
embed.set_thumbnail(url=user.avatar_url)
embed.add_field(name="Joined",
value=user.joined_at.strftime(date_format))
members = sorted(ctx.guild.members, key=lambda m: m.joined_at)
embed.add_field(name="Join position",
value=str(members.index(user) + 1))
embed.add_field(name="Made account on",
value=user.created_at.strftime(date_format))
if len(user.roles) > 1:
role_string = ' '.join([r.mention for r in user.roles][1:])
embed.add_field(name="Roles [{}]".format(len(user.roles) - 1),
value=role_string,
inline=False)
embed.set_footer(text='ID: ' + str(user.id))
await ctx.send(embed=embed)
format = "%a, %d %b %Y | %H:%M:%S %ZGMT"
@commands.command(aliases=["remind", "remindme", "remind_me", "timer", "alarm", 'rm'])
async def reminder(self, ctx, time, *, reminder):
embed = discord.Embed(color=0x0000f0,timestamp=datetime.utcnow())
embed.set_footer(text=f"{reminder}",
icon_url=f"{ctx.author.avatar_url}")
seconds = 0
if time.lower().endswith("d"):
seconds += int(time[:-1]) * 60 * 60 * 24
counter = f"{seconds // 60 // 60 // 24} day(s)"
if time.lower().endswith("h"):
seconds += int(time[:-1]) * 60 * 60
counter = f"{seconds // 60 // 60} hour(s)"
elif time.lower().endswith("m"):
seconds += int(time[:-1]) * 60
counter = f"{seconds // 60} minutes"
elif time.lower().endswith("s"):
seconds += int(time[:-1])
counter = f"{seconds} seconds"
if seconds == 0:
embed.add_field(
name='Warning',
value=
'Please specify a proper duration, please put a time minimum of `5` minutes for more information.'
)
elif seconds < 300:
embed.add_field(
name='Warning',
value=
'You have specified a too short duration!\nMinimum duration is 5 minutes.'
)
elif seconds > 7776000:
embed.add_field(
name='Warning',
value=
'You have specified a too long duration!\nMaximum duration is 90 days.'
)
else:
await ctx.send(
f"Alright, {ctx.author.mention}. I will remind you to `{reminder}` in `{counter}`."
)
await asyncio.sleep(seconds)
embed = discord.Embed(
title="Reminded!",
description=
f"Hey, {ctx.author.mention}. You asked me to remind you to: \n`{reminder}` \n`{counter}` ago.",
color=discord.Colour.random())
embed.set_footer(text=f"{ctx.author.name}",
icon_url=f"{ctx.author.avatar_url}")
await ctx.send(content=ctx.author.mention, embed=embed)
@commands.command(aliases=['avatar', 'av'])
async def aVatur(self, ctx, *, member: discord.Member = None):
if not member:
member = ctx.message.author
userAvatar = member.avatar_url
em = discord.Embed(title='User avatar:', color=discord.Colour.random())
em.set_image(url=userAvatar)
await ctx.send(embed=em)
commands.sniped_messages = {}
async def on_message_delete(message):
if message.attachments:
bob = message.attachments[0]
commands.sniped_messages[message.guild.id] = (bob.proxy_url,
message.content,
message.author,
message.channel.name,
message.created_at)
else:
commands.sniped_messages[message.guild.id] = (message.content,
message.author,
message.channel.name,
message.created_at)
@commands.command(aliases=['gb', 'banlist'])
@commands.has_permissions(ban_members=True)
async def getbans(self, ctx):
x = await ctx.message.guild.bans()
x = '\n'.join([str(y.user) for y in x])
embed = discord.Embed(title="List of Banned Members",
description=x,
colour=0xFFFFF)
return await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Utility(bot))
|
nilq/baby-python
|
python
|
from __future__ import division
import numpy as np
import pandas as pd
from PyAstronomy import pyasl
import astropy.constants as c
import matplotlib.pyplot as plt
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['ytick.major.width'] = 2
def get_data():
def get_stars():
d = pyasl.SWEETCat()
return d.data
def get_planets():
v = pyasl.ExoplanetEU2()
return v.getAllDataPandas()
df1 = get_stars()
df2 = get_planets()
df = pd.merge(df1, df2, left_on='star', right_on='star_name', how='right')
df['radius'] = df['radius'] * (c.R_jup.value/c.R_earth.value) # Earth radii
df = df[df['radius'] <= 3.5]
return df
if __name__ == '__main__':
df = get_data()
i1 = df['radius'] <= 2.0
i2 = df['metal'] >= 0.0
bins = np.logspace(-1.0, 0.55, 13)
plt.figure()
plt.hist(df['radius'].dropna(), bins=bins)
plt.vlines(2, 0, 300)
plt.xlabel('Planet radius [Earth radii]')
plt.xscale('log')
plt.figure()
plt.plot(df[i1]['metal'], df[i1]['radius'], '.', alpha=0.5)
plt.plot(df[~i1]['metal'], df[~i1]['radius'], '.', alpha=0.5)
plt.xlabel('[Fe/H]')
plt.ylabel('Planet radius [Earth radii]')
plt.figure()
plt.subplot(211)
plt.hist(df[i2]['radius'].dropna(), bins=bins)
plt.ylabel('Metal-rich')
plt.xscale('log')
plt.subplot(212)
plt.hist(df[~i2]['radius'].dropna(), bins=bins)
plt.ylabel('Metal-poor')
plt.xscale('log')
plt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AlisisReportRow import AlisisReportRow
class KoubeiMarketingDataAlisisReportQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiMarketingDataAlisisReportQueryResponse, self).__init__()
self._report_data = None
@property
def report_data(self):
return self._report_data
@report_data.setter
def report_data(self, value):
if isinstance(value, list):
self._report_data = list()
for i in value:
if isinstance(i, AlisisReportRow):
self._report_data.append(i)
else:
self._report_data.append(AlisisReportRow.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(KoubeiMarketingDataAlisisReportQueryResponse, self).parse_response_content(response_content)
if 'report_data' in response:
self.report_data = response['report_data']
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import numpy as np
ic = 8
ih = 64
iw = 64
oc = 8
oh = 61
ow = 61
kk = 4
conv2d = nn.Conv2d(in_channels=ic, out_channels=oc, kernel_size=kk, padding=0, bias=False)
relu = nn.ReLU(inplace=False)
# randomize input feature map
ifm = torch.rand(1, ic, ih, iw)*255-128
#ifm = torch.ones(1, ic, ih, iw)
ifm = torch.round(ifm)
# randomize weight
weight = torch.rand(oc, ic, kk, kk)*255 - 128
# weight = torch.rand(oc, ic, kk, kk)*4
# weight = torch.ones(oc, ic, kk, kk)
# weight = torch.randint(1,4,(oc, ic, kk, kk))
weight = torch.round(weight)
# setting the kernel of conv2d as weight
conv2d.weight = nn.Parameter(weight)
# computing output feature
ofm = conv2d(ifm)
ofm_relu = relu(ofm)
ifm_np = ifm.data.numpy().astype(int)
weight_np = weight.data.numpy().astype(int)
ofm_np = ofm_relu.data.numpy().astype(int)
# write data as a 2's complement binary representation type
with open("ifm_bin_c%dxh%dxw%d.txt"%(ic, ih, iw), "w") as f:
for i in range(ic):
for j in range(ih):
for k in ifm_np[0, i, j, :]:
s = np.binary_repr(k, 8) + " "
f.write(s)
f.write("\n")
f.write("\n")
with open("ofm_bin_c%dxh%dxw%d.txt"%(oc, oh, ow), "w") as f:
for i in range(oc):
for j in range(oh):
for k in ofm_np[0, i, j, :]:
s = np.binary_repr(k, 25) + " "
f.write(s)
f.write("\n")
f.write("\n")
with open("weight_bin_co%dxci%dxk%dxk%d.txt"%(oc, ic, kk, kk), "w") as f:
for i in range(oc):
for j in range(ic):
for k in range(kk):
for l in weight_np[i, j, k, :]:
s = np.binary_repr(l, 8) + " "
f.write(s)
f.write("\n")
f.write("\n")
f.write("\n")
# write out data as decimal type
with open("ifm_dec_%dxh%dxw%d.txt" % (ic, ih, iw), "w") as f:
for i in range(ic):
for j in range(ih):
for k in ifm_np[0, i, j, :]:
s = str(k) + "\t "
f.write(s)
f.write("\n")
f.write("\n")
with open("ofm_dec_c%dxh%dxw%d.txt" % (oc, oh, ow), "w") as f:
for i in range(oc):
for j in range(oh):
for k in ofm_np[0, i, j, :]:
s = str(k) + ","
f.write(s)
f.write("\n")
f.write("\n")
with open("weight_dec_co%dxci%dxk%dxk%d.txt" % (oc, ic, kk, kk), "w") as f:
for i in range(oc):
for j in range(ic):
for k in range(kk):
for l in weight_np[i, j, k, :]:
s = str(l) + " "
f.write(s)
f.write("\n")
f.write("\n")
f.write("\n")
tile_length = 16
num_tile = 64//tile_length
with open("ifm.txt", "w") as f:
for ii in range(13):
for jj in range(num_tile):
for c in range(ic):
for j in range(tile_length + 3):
col = jj*tile_length + j
for i in range(8):
row = ii*5+i
# print(row, c, ii)
k = ifm_np[0, c, row, col] if ((row < 64) and (col < 64))else 0
s = np.binary_repr(k, 8) + " "
f.write(s)
f.write("\n")
f.write("\n")
f.write("\n")
f.write("\n")
with open("weight.txt", "w") as f:
for i in range(oc):
for ii in range(13):
for jj in range(num_tile):
for j in range(ic):
for k in range(kk):
for l in weight_np[i, j, :, k]:
s = np.binary_repr(l, 8) + " "
f.write(s)
f.write("\n")
f.write("\n")
f.write("\n")
f.write("\n")
f.write("\n")
with open("ifm_d_c%dxh%dxw%d.txt"%(ic, ih, iw), "w") as f:
for ii in range(13):
for jj in range(num_tile):
for c in range(ic):
for j in range(tile_length + 3):
col = jj*tile_length + j
for i in range(8):
row = ii*5+i
# print(row, c, ii)
k = ifm_np[0, c, row, col] if ((row < 64) and (col < 64)) else 0
s = str(k) + " "
f.write(s)
f.write("\n")
f.write("\n")
f.write("\n")
f.write("\n")
with open("weight_d_co%dxci%dxk%dxk%d.txt"%(oc, ic, kk, kk), "w") as f:
for i in range(oc):
for ii in range(13):
for jj in range(num_tile):
for j in range(ic):
for k in range(kk):
for l in weight_np[i, j, :, k]:
s = str(l) + " "
f.write(s)
f.write("\n")
f.write("\n")
f.write("\n")
f.write("\n")
f.write("\n")
|
nilq/baby-python
|
python
|
"""
Lambdata - a collection of data science helper functions
"""
import lambdata_mpharm88.class_example
# sample code
|
nilq/baby-python
|
python
|
""""""
from typing import List, Dict, Optional, Any
import json
from shimoku_api_python.exceptions import ApiClientError
class GetExplorerAPI(object):
def __init__(self, api_client):
self.api_client = api_client
def get_business(self, business_id: str, **kwargs) -> Dict:
"""Retrieve an specific user_id
:param business_id: user UUID
"""
endpoint: str = f'business/{business_id}'
business_data: Dict = (
self.api_client.query_element(
method='GET', endpoint=endpoint, **kwargs
)
)
return business_data
def get_app_type(self, app_type_id: str, **kwargs) -> Dict:
"""Retrieve an specific app_id metadata
:param app_type_id: app type UUID
"""
endpoint: str = f'apptype/{app_type_id}'
app_type_data: Dict = (
self.api_client.query_element(
method='GET', endpoint=endpoint, **kwargs
)
)
return app_type_data
def get_app(self, business_id: str, app_id: str, **kwargs) -> Dict:
"""Retrieve an specific app_id metadata
:param business_id: business UUID
:param app_id: app UUID
"""
endpoint: str = f'business/{business_id}/app/{app_id}'
app_data: Dict = (
self.api_client.query_element(
method='GET', endpoint=endpoint, **kwargs
)
)
return app_data
def _get_report_with_data(
self,
business_id: Optional[str] = None,
app_id: Optional[str] = None,
report_id: Optional[str] = None,
external_id: Optional[str] = None,
**kwargs,
) -> Dict:
"""Retrieve an specific report data
:param business_id: business UUID
:param app_id: Shimoku app UUID (only required if the external_id is provided)
:param report_id: Shimoku report UUID
:param external_id: external report UUID
"""
if report_id:
endpoint: str = f'business/{business_id}/app/{app_id}/report/{report_id}'
report_data: Dict = (
self.api_client.query_element(
method='GET',
endpoint=endpoint,
**kwargs
)
)
elif external_id:
if not app_id:
raise ValueError(
'If you retrieve by external_id '
'you must provide an app_id'
)
report_ids_in_app: List[str] = (
self.get_app_all_reports(app_id)
)
for report_id in report_ids_in_app:
report_data_: Dict = self.get_report(report_id=report_id)
if report_data_['etl_code_id'] == external_id:
endpoint: str = (
f'business/{business_id}/'
f'app/{app_id}/'
f'report/{report_id}'
)
report_data: Dict = (
self.api_client.query_element(
method='GET', endpoint=endpoint, **kwargs
)
)
return report_data
else:
return {}
else:
raise ValueError('Either report_id or external_id must be provided')
if report_data.get('chartData'):
report_data['chartData'] = json.loads(report_data['chartData'])
return report_data
def get_report(
self,
business_id: Optional[str] = None,
app_id: Optional[str] = None,
report_id: Optional[str] = None,
external_id: Optional[str] = None,
**kwargs,
) -> Dict:
"""Retrieve an specific report data
:param business_id: business UUID
:param app_id: Shimoku app UUID (only required if the external_id is provided)
:param report_id: Shimoku report UUID
:param external_id: external report UUID
"""
report_data: Dict = (
self._get_report_with_data(
business_id=business_id,
app_id=app_id,
report_id=report_id,
external_id=external_id,
)
)
# we do not return the chartData in the get_report()
# use _get_report_with_data() instead
if report_data.get('chartData'):
report_data.pop('chartData')
return report_data
def get_report_data(
self, business_id: str,
app_id: Optional[str] = None,
report_id: Optional[str] = None,
external_id: Optional[str] = None,
) -> List[Dict]:
""""""
report: Dict = self.get_report(
business_id=business_id,
app_id=app_id,
report_id=report_id,
)
if report['reportType']:
report: Dict = (
self._get_report_with_data(
business_id=business_id,
app_id=app_id,
report_id=report_id,
external_id=external_id,
)
)
report_data: List = report.get('chartData')
if report_data:
return report_data
else:
return list()
else:
endpoint: str = (
f'business/{business_id}/'
f'app/{app_id}/'
f'report/{report_id}/reportEntries'
)
report_entries: Dict = [
self.api_client.query_element(
method='GET', endpoint=endpoint,
)
]
return report_entries[0]['items']
class CascadeExplorerAPI(GetExplorerAPI):
def __init__(self, api_client):
super().__init__(api_client)
def get_universe_businesses(self) -> List[Dict]:
endpoint: str = f'businesses'
return (
self.api_client.query_element(
endpoint=endpoint, method='GET',
)
)['items']
def find_business_by_name_filter(
self, name: Optional[str] = None,
) -> Dict:
""""""
businesses: List[Dict] = self.get_universe_businesses()
businesses: List[Dict] = [
business
for business in businesses
if business['name'] == name
]
if not businesses:
return {}
assert len(businesses) == 1
business: Dict = businesses[0]
return business
def get_universe_app_types(self) -> List[Dict]:
endpoint: str = f'apptypes'
return (
self.api_client.query_element(
endpoint=endpoint, method='GET',
)
)['items']
def find_app_type_by_name_filter(
self, name: Optional[str] = None,
normalized_name: Optional[str] = None,
) -> Dict:
""""""
app_types: List[Dict] = self.get_universe_app_types()
if name:
app_types: List[Dict] = [
app_type
for app_type in app_types
if app_type['name'] == name
]
elif normalized_name:
app_types: List[Dict] = [
app_type
for app_type in app_types
if app_type['normalizedName'] == normalized_name
]
if not app_types:
return {}
assert len(app_types) == 1
app_type: Dict = app_types[0]
return app_type
def get_business_apps(self, business_id: str) -> List[Dict]:
"""Given a business retrieve all app metadata
:param business_id: business UUID
"""
endpoint: str = f'business/{business_id}/apps'
apps_raw: Dict = (
self.api_client.query_element(
endpoint=endpoint, method='GET',
)
)
apps: List[Dict] = apps_raw.get('items')
if not apps:
return []
return apps
def get_business_app_ids(self, business_id: str) -> List[str]:
"""Given a business retrieve all app ids
:param business_id: business UUID
"""
apps: Optional[List[Dict]] = (
self.get_business_apps(
business_id=business_id,
)
)
return [app['id'] for app in apps]
def find_app_by_name_filter(
self, business_id: str, name: Optional[str] = None,
normalized_name: Optional[str] = None,
) -> Dict:
""""""
apps_list: List[Dict] = self.get_business_apps(business_id=business_id)
if name:
apps: List[Dict] = [
app
for app in apps_list
if app['name'] == name
]
elif normalized_name:
apps: List[Dict] = [
app
for app in apps_list
if app['normalizedName'] == normalized_name
]
if not apps:
return {}
assert len(apps) == 1
apps: Dict = apps[0]
return apps
def get_app_path_names(self, business_id: str, app_id: str) -> List[str]:
"""Given a Path that belongs to an AppId retrieve all reportId
:param business_id: business UUID
:param app_id: app UUID
"""
reports: List[Dict] = (
self.get_app_reports(
business_id=business_id,
app_id=app_id,
)
)
paths: List[str] = []
for report in reports:
path: Optional[str] = report.get('path')
if path:
paths = paths + [path]
return paths
def get_app_reports(self, business_id: str, app_id: str) -> List[Dict]:
"""Given an App Id retrieve all reports data from all reports
that belongs to such App Id.
"""
endpoint: str = f'business/{business_id}/app/{app_id}/reports'
reports_raw: Dict = (
self.api_client.query_element(
endpoint=endpoint, method='GET',
)
)
reports = reports_raw.get('items')
if not reports:
return []
return reports
def get_app_report_ids(self, business_id: str, app_id: str) -> List[str]:
"""Given an app retrieve all report_id
:param business_id: business UUID
:param app_id: app UUID
"""
reports: List[Dict] = (
self.get_app_reports(
business_id=business_id,
app_id=app_id,
)
)
return [report['id'] for report in reports]
# TODO pending
def get_report_all_report_entries(self, report_id: str) -> List[str]:
"""Given a report retrieve all reportEntries
:param report_id: app UUID
"""
raise NotImplementedError
def get_path_report_ids(
self, business_id: str, app_id: str, path_name: str,
) -> List[str]:
"""Given an App return all Reports ids that belong to a target path"""
reports: List[Dict] = self.get_app_reports(
business_id=business_id, app_id=app_id,
)
path_report_ids: List[str] = []
for report in reports:
path: Optional[str] = report.get('path')
if path == path_name:
report_id: str = report['id']
path_report_ids = path_report_ids + [report_id]
return path_report_ids
def get_path_reports(
self, business_id: str, app_id: str, path_name: str,
) -> List[Dict]:
"""Given an App return all Reports data that belong to a target path"""
reports: List[Dict] = self.get_app_reports(
business_id=business_id, app_id=app_id,
)
path_reports: List[Dict] = []
for report in reports:
path: Optional[str] = report.get('path')
if path == path_name:
path_reports = path_reports + [report]
return path_reports
def get_business_apps_with_filter(
self, business_id: str, app_filter: Dict
) -> List[Dict]:
"""
"""
app_ids: List[str] = (
self.get_business_apps(
business_id=business_id,
)
)
apps: List[Dict] = []
for app_id in app_ids:
app: Dict = self.get_app(app_id)
for filter_key, filter_value in app_filter.items():
if app[filter_key] == filter_value:
apps.append(app)
return apps
def get_app_reports_by_filter(
self, app_id: str,
report_filter: Dict
) -> List[Dict]:
"""Having an AppId first retrieve all reportId that belongs
to the target AppId. Second filter and take the reportId
# TODO filter example!!
"""
report_ids: List[str] = (
self.get_app_all_reports(
app_id=app_id,
)
)
reports: List[Dict] = []
for report_id in report_ids:
report: Dict = self.get_report(report_id=report_id)
for filter_key, filter_value in report_filter.items():
if report[filter_key] == filter_value:
reports.append(report)
return reports
def get_app_by_type(
self, business_id: str, app_type_id: str,
) -> Dict:
"""
:param business_id: business UUID
:param app_type_id: appType UUID
"""
apps: List[Dict] = self.get_business_apps(business_id=business_id)
# Is expected to be a single item (Dict) but an App
# could have several reports with the same name
result: Any = {}
for app in apps:
if app['type']['id'] == app_type_id:
if result:
if len(result) == 1:
result: List[Dict] = result + [app]
else:
result: List[Dict] = result + [app]
else:
result: List[Dict] = [app]
if result:
assert len(result) == 1
return result[0]
else:
return {}
def get_app_by_name(self, business_id: str, name: str) -> Dict:
"""
:param business_id: business UUID
:param name: app or apptype name
"""
apps: List[Dict] = self.get_business_apps(business_id=business_id)
# Is expected to be a single item (Dict) but an App
# could have several reports with the same name
result: Any = {}
for app in apps:
# if App name does not match check the AppType,
# if it does not match the AppType Name then pass to the following App
if app.get('name'):
if not app['name'] == name:
continue
else:
if not app.get('type'):
continue
try:
app_type: Dict = self.get_app_type(
app_type_id=app['type']['id'],
)
except ApiClientError: # Business admin user
continue
if (
not app_type['normalizedName'] == name
and
not app_type['name'] == name
):
continue
if result:
if len(result) == 1:
result: List[Dict] = result + [app]
else:
result: List[Dict] = result + [app]
else:
result: List[Dict] = [app]
if result:
assert len(result) == 1
return result[0]
else:
return {}
class CreateExplorerAPI(object):
_find_business_by_name_filter = CascadeExplorerAPI.find_business_by_name_filter
_find_app_type_by_name_filter = CascadeExplorerAPI.find_app_type_by_name_filter
def __init__(self, api_client):
self.api_client = api_client
def _create_normalized_name(self, name: str) -> str:
"""Having a name create a normalizedName
Example
----------------------
# "name": "Test Borrar"
# "normalizedName": "test-borrar"
"""
return '-'.join(name.split(' ')).lower()
def _create_key_name(self, name: str) -> str:
"""Having a name create a key
Example
----------------------
# "name": "Test Borrar"
# "key": "TEST_BORRAR"
"""
return '_'.join(name.split(' ')).upper()
def create_business(self, name: str) -> Dict:
""""""
business: Dict = self._find_business_by_name_filter(name=name)
if business:
raise ValueError(f'A Business with the name {name} already exists')
endpoint: str = 'business'
item: Dict = {'name': name}
return self.api_client.query_element(
method='POST', endpoint=endpoint, **{'body_params': item},
)
def create_app_type(self, name: str) -> Dict:
""""""
app_type: Dict = self._find_app_type_by_name_filter(name=name)
if app_type:
raise ValueError(f'An AppType with the name {name} already exists')
endpoint: str = 'apptype'
# for instance:
# "name": "Test Borrar"
# "key": "TEST_BORRAR"
# "normalizedName": "test-borrar"
normalized_name: str = self._create_normalized_name(name)
key: str = self._create_key_name(name)
item: Dict = {
'name': name,
'key': key,
'normalizedName': normalized_name,
}
return self.api_client.query_element(
method='POST', endpoint=endpoint, **{'body_params': item},
)
def create_app(
self, business_id: str,
name: Optional[str],
app_type_id: Optional[str] = None,
app_metadata: Optional[Dict] = None,
) -> Dict:
"""
"""
endpoint: str = f'business/{business_id}/app'
item: Dict = {}
if app_type_id:
item['appTypeId'] = app_type_id
normalized_name: str = self._create_normalized_name(name)
item['name'] = name
item['normalizedName'] = normalized_name
if app_metadata:
hide_title: bool = app_metadata.get('hideTitle')
if hide_title:
item['hideTitle'] = 'true' if hide_title else 'false'
else:
item['hideTitle'] = 'true'
# These are the optional fields (previous were the mandatory ones)
allowed_columns: List[str] = [
'paymentType', 'trialDays',
'appSubscriptionInUserId',
]
# Check all kwargs keys are in the allowed_columns list
assert all([key in allowed_columns for key in app_metadata.keys()])
# Update items with kwargs
item.update(app_metadata)
else:
item['hideTitle'] = 'true'
return self.api_client.query_element(
method='POST', endpoint=endpoint, **{'body_params': item},
)
def create_app_from_app_type_normalized_name(self, app_type_name: str) -> Dict:
"""Create AppType and App if required and return the App component
"""
try:
app_type: Dict = self._create_app_type(name=app_type_name)
except ValueError: # It already exists then
app_type: Dict = (
self._find_app_type_by_name_filter(name=app_type_name)
)
app_type_id: str = app_type['id']
apps: Dict = self._get_business_apps(business_id=self.business_id)
target_apps = [app for app in apps if app['appType']['id'] == app_type_id]
if not apps:
app: Dict = (
self._create_app(
business_id=self.business_id,
app_type_id=app_type_id,
)
)
else:
app: Dict = target_apps[0]
return app
def create_report(
self, business_id: str, app_id: str, report_metadata: Dict,
real_time: bool = False,
) -> Dict:
"""Create new Report associated to an AppId
:param business_id:
:param app_id:
:param report_metadata: A dict with all the values required to create a report
"""
def append_fields(item: Dict, field_name: str) -> Dict:
"""Equivalent to
grid: Optional[str] = report_metadata.get('grid')
if grid:
item['grid'] = grid
"""
field_value: Optional[str] = report_metadata.get(field_name)
if field_value is not None:
item[field_name] = field_value
return item
endpoint: str = f'business/{business_id}/app/{app_id}/report'
# These are the mandatory fields
title: str = report_metadata['title']
# These are the mandatory fields
item: Dict = {
'appId': app_id,
'title': title,
}
item: Dict = append_fields(item=item, field_name='path')
item: Dict = append_fields(item=item, field_name='grid')
item: Dict = append_fields(item=item, field_name='reportType')
item: Dict = append_fields(item=item, field_name='order')
item: Dict = append_fields(item=item, field_name='sizeColumns')
item: Dict = append_fields(item=item, field_name='sizeRows')
item: Dict = append_fields(item=item, field_name='padding')
if real_time:
item['subscribe'] = True
# Update items with kwargs
item.update(report_metadata)
# Optional values
report_type: str = report_metadata.get('reportType')
if report_type:
if report_type != 'Table': # Tables have reportType as None
item['reportType'] = report_type
elif report_metadata.get('smartFilters'):
# Smart filters only exists for Tables
item['smartFilters'] = report_metadata['smartFilters']
report: Dict = (
self.api_client.query_element(
method='POST', endpoint=endpoint,
**{'body_params': item},
)
)
return {
k: v
for k, v in report.items()
if k not in ['chartData', 'owner', 'chartDataItem'] # we do not return the data
}
def _create_report_entries(
self, business_id: str, app_id: str, report_id: str,
items: List[Dict],
) -> List[Dict]:
"""Create new reportEntry associated to a Report
:param business_id:
:param app_id:
:param report_id:
:param report_entry_metadata: A dict with all the values required to create a reportEntry
"""
endpoint: str = (
f'business/{business_id}/'
f'app/{app_id}/'
f'report/{report_id}/'
f'reportEntry'
)
report_entries: List[Dict] = []
for item in items:
report_entry: Dict = (
self.api_client.query_element(
method='POST', endpoint=endpoint,
**{'body_params': item},
)
)
report_entries = report_entries + [report_entry]
return report_entries
class UpdateExplorerAPI(CascadeExplorerAPI):
_find_business_by_name_filter = CascadeExplorerAPI.find_business_by_name_filter
_find_app_type_by_name_filter = CascadeExplorerAPI.find_app_type_by_name_filter
def __init__(self, api_client):
self.api_client = api_client
def update_business(self, business_id: str, business_data: Dict) -> Dict:
""""""
name = business_data.get('name')
if name:
business: Dict = self._find_business_by_name_filter(name=name)
if business:
raise ValueError(
f'Cannot Update | '
f'A Business with the name {name} already exists'
)
endpoint: str = f'business/{business_id}'
return self.api_client.query_element(
method='PATCH', endpoint=endpoint, **{'body_params': business_data},
)
def update_app_type(self, app_type_id: str, app_type_metadata: Dict) -> Dict:
""""""
name = app_type_metadata.get('name')
if name:
_app_type: Dict = self._find_app_type_by_name_filter(name=name)
if _app_type:
raise ValueError(
f'Cannot Update | '
f'A AppType with the name {name} already exists'
)
endpoint: str = f'apptype/{app_type_id}'
return self.api_client.query_element(
method='PATCH', endpoint=endpoint, **{'body_params': app_type_metadata},
)
def update_app(self, business_id: str, app_id: str, app_metadata: Dict) -> Dict:
"""
:param business_id:
:param app_id:
:param app_data: contain the elements to update key
is the col name and value the value to overwrite
"""
endpoint: str = f'business/{business_id}/app/{app_id}'
return self.api_client.query_element(
method='PATCH', endpoint=endpoint,
**{'body_params': app_metadata},
)
def update_report(
self, business_id: str, app_id: str, report_id: str,
report_metadata: Dict,
) -> Dict:
""""""
endpoint: str = f'business/{business_id}/app/{app_id}/report/{report_id}'
return self.api_client.query_element(
method='PATCH', endpoint=endpoint,
**{'body_params': report_metadata},
)
class MultiCascadeExplorerAPI(CascadeExplorerAPI):
def __init__(self, api_client):
super().__init__(api_client)
# TODO paginate
def get_business_paths(self, business_id: str) -> List[str]:
"""Given a business retrieve all path names
:param business_id: business UUID
"""
app_ids: List[str] = self.get_business_apps(business_id=business_id)
paths: List[str] = []
for app_id in app_ids:
app_paths: List[str] = self.get_app_paths(app_id=app_id)
paths = paths + app_paths
return paths
# TODO paginate
def get_business_reports(self, business_id: str) -> List[str]:
"""Given a business retrieve all report ids
:param business_id: business UUID
"""
app_ids: List[str] = self.get_business_apps(business_id=business_id)
report_ids: List[str] = []
for app_id in app_ids:
app_report_ids: List[str] = self.get_app_reports(app_id=app_id)
report_ids = report_ids + app_report_ids
return report_ids
# TODO paginate
def get_business_id_by_report(self, report_id: str, **kwargs) -> str:
"""Bottom-up method
Having a report_id return the app it belongs to
"""
app_id: str = self.get_app_id_by_report(report_id=report_id, **kwargs)
business_id: str = self.get_business_id_by_app(app_id=app_id, **kwargs)
return business_id
class DeleteExplorerApi(MultiCascadeExplorerAPI, UpdateExplorerAPI):
"""Get Businesses, Apps, Paths and Reports in any possible combination
"""
def __init__(self, api_client):
super().__init__(api_client)
def delete_business(self, business_id: str):
"""Delete a Business.
All apps, reports and data associated with that business is removed by the API
"""
endpoint: str = f'business/{business_id}'
self.api_client.query_element(
method='DELETE', endpoint=endpoint,
)
def delete_app_type(self, app_type_id: str):
"""Delete an appType"""
endpoint: str = f'apptype/{app_type_id}'
self.api_client.query_element(
method='DELETE', endpoint=endpoint,
)
def delete_app(self, business_id: str, app_id: str) -> Dict:
"""Delete an App
All reports and data associated with that app is removed by the API
"""
endpoint: str = f'business/{business_id}/app/{app_id}'
result: Dict = self.api_client.query_element(
method='DELETE', endpoint=endpoint
)
return result
def delete_path(self, business_id: str, app_id: str, path_name: str):
"""Delete all Reports in a path
All data associated with that report is removed by the API"""
report_ids: List[str] = (
self.get_path_reports(
business_id=business_id,
app_id=app_id,
path_name=path_name,
)
)
for report_id in report_ids:
self.delete_report_and_entries(report_id)
def delete_report(
self, business_id: str, app_id: str, report_id: str,
relocating: bool = True, delete_data: bool = True,
) -> None:
"""Delete a Report, relocating reports underneath to avoid errors
"""
reports: List[Dict] = (
self._get_app_reports(
business_id=business_id,
app_id=app_id
)
)
target_report: Dict = self.get_report(
business_id=business_id,
app_id=app_id,
report_id=report_id,
)
target_report_grid: str = target_report.get('grid')
# TO BE deprecated with row, column and grid!
# TODO this looks like a different method
if target_report_grid:
target_report_row: int = int(target_report_grid.split(',')[0])
for report in reports:
report_grid: str = report.get('grid')
if report_grid:
report_row: int = int(report_grid.split(',')[0])
if report_row > target_report_row:
report_row -= 1
report_column: int = int(report.get('grid').split(',')[1])
grid: str = f'{report_row}, {report_column}'
self.update_report(
business_id=business_id,
app_id=app_id, report_id=report_id,
report_metadata={'grid': grid},
)
endpoint: str = f'business/{business_id}/app/{app_id}/report/{report_id}'
result: Dict = self.api_client.query_element(
method='DELETE', endpoint=endpoint
)
return result
def delete_report_entries(
self, business_id: str, app_id: str, report_id: str,
) -> None:
"""Delete a Report, relocating reports underneath to avoid errors
"""
report_entries: List[Dict] = (
self.get_report_data(
business_id=business_id,
app_id=app_id,
report_id=report_id,
)
)
for report_entry in report_entries:
report_entry_id: str = report_entry['id']
endpoint: str = (
f'business/{business_id}/'
f'app/{app_id}/'
f'report/{report_id}/'
f'reportEntry/{report_entry_id}'
)
result: Dict = self.api_client.query_element(
method='DELETE', endpoint=endpoint
)
class MultiDeleteApi:
"""Get Businesses, Apps, Paths and Reports in any possible combination
"""
_get_business = GetExplorerAPI.get_business
_get_app_type = GetExplorerAPI.get_app_type
_get_app = GetExplorerAPI.get_app
_delete_business = DeleteExplorerApi.delete_business
_delete_app = DeleteExplorerApi.delete_app
_delete_app_type = DeleteExplorerApi.delete_app_type
_delete_report = DeleteExplorerApi.delete_report
def __init__(self):
return
def _delete_business_and_app_type(
self, business_id: str, app_type_id: str
):
try:
self._delete_business(business_id)
except Exception as e_bd:
raise ValueError(
f'{e_bd} | Nor Business nor AppType were deleted | '
f'business_id: {business_id} | '
f'app_type_id: {app_type_id}'
)
try:
_ = self._get_business(business_id)
except ApiClientError:
pass
except Exception as e_gb:
raise ValueError(
f'{e_gb} | Nor Business nor AppType were deleted | '
f'business_id: {business_id} | app_type_id: {app_type_id}'
)
try:
self._delete_app_type(app_type_id)
except ApiClientError:
return {}
except Exception as e_atd:
raise ValueError(
f'{e_atd} | AppType was not deleted | '
f'app_type_id: {app_type_id}'
)
try:
_ = self._get_app_type(app_type_id)
except ApiClientError:
return {}
except Exception as e_atg:
raise ValueError(
f'{e_atg} | AppType was not deleted | '
f'app_type_id: {app_type_id}'
)
def _delete_business_and_app(
self, business_id: str, app_id: str,
):
try:
self._delete_business(business_id)
except Exception as e_bd:
raise ValueError(
f'{e_bd} | Nor Business nor App were deleted | '
f'business_id: {business_id} | '
f'app_id: {app_id}'
)
try:
_ = self._get_business(business_id)
except ApiClientError:
pass
except Exception as e_gb:
raise ValueError(
f'{e_gb} | Nor Business nor App were deleted | '
f'business_id: {business_id} | '
f'app_id: {app_id}'
)
try:
self._delete_app(app_id)
except ApiClientError:
return {}
except Exception as e_atd:
raise ValueError(
f'{e_atd} | App was not deleted | '
f'app_id: {app_id}'
)
try:
_ = self._get_app(app_id)
except ApiClientError:
return {}
except Exception as e_atg:
raise ValueError(
f'{e_atg} | App was not deleted | '
f'app_id: {app_id}'
)
class MultiCreateApi(MultiDeleteApi):
"""If some upper level elements are not created it does it
"""
_get_universe_app_types = CascadeExplorerAPI.get_universe_app_types
_get_app_by_type = CascadeExplorerAPI.get_app_by_type
_create_business = CreateExplorerAPI.create_business
_create_app_type = CreateExplorerAPI.create_app_type
_create_app = CreateExplorerAPI.create_app
_create_report = CreateExplorerAPI.create_report
def __init__(self):
super().__init__()
def create_business_and_app(
self, app_type_id: str, business_name: str, app_metadata: Dict,
) -> Dict[str, Dict]:
"""Create new Report associated to an AppId
:param app_type_id:
:param business_name:
:param app_metadata:
"""
business: Dict = self._create_business(name=business_name)
business_id: str = business['id']
try:
app: Dict = (
self._create_app(
business_id=business_id,
app_type_id=app_type_id,
app_metadata=app_metadata,
)
)
except Exception as e:
self._delete_business(business_id=business_id)
try:
_ = self._get_business(business_id)
raise ValueError(
f'{e} | The app was not created but a new business did '
f'that probably should be deleted manually with id '
f'{business_id}'
)
except ApiClientError:
return {}
return {
'business': business,
'app': app,
}
def create_app_type_and_app(
self, business_id: str,
app_type_metadata: Dict,
app_metadata: Optional[Dict] = None,
) -> Dict[str, Dict]:
"""
If app_type_id is None we create it
"""
try:
app_type: Dict = self._create_app_type(**app_type_metadata)
except ValueError:
app_type_name: str = app_type_metadata['name']
app_type: Dict = self._get_app_type_by_name(app_type_name)
app_type_id: str = app_type['id']
app_metadata['app_type_id'] = app_type_id
app_metadata['business_id'] = business_id
app: Dict = self._get_app_by_type(
business_id=business_id,
app_type_id=app_type_id,
)
if not app:
if app_metadata.get('name'):
app: Dict = self._create_app(**app_metadata)
else: # get the AppType name and use it
app_metadata.update({'name': app_type_metadata['name']})
app: Dict = self._create_app(**app_metadata)
return {
'app_type': app_type,
'app': app
}
def create_app_and_report(
self, business_id: str, app_type_id: str,
app_metadata: Dict, report_metadata: Dict,
) -> Dict:
"""Create new Report associated to an AppId
:param business_id:
:param app_type_id:
:param app_metadata:
:param report_metadata: A dict with all the values required to create a report
"""
app: Dict = (
self._create_app(
business_id=business_id,
app_type_id=app_type_id,
app_metadata=app_metadata,
)
)
app_id: str = app['id']
try:
report: Dict = (
self._create_report(
business_id=business_id,
app_id=app_id,
report_metadata=report_metadata,
)
)
except Exception as e:
raise f'{e} | app_id created: {app_id} | Better delete it'
return report
def create_business_app_and_app_type(
self, business_name: str,
app_metadata: Dict,
app_type_metadata: Dict,
) -> Dict[str, Dict]:
"""
"""
app_type: Dict = self._create_app_type(**app_type_metadata)
app_type_id: str = app_type['id']
app_metadata['app_type_id'] = app_type_id
business: Dict = {}
try:
business: Dict = self._create_business(business_name)
business_id: str = business['id']
app_metadata['business_id'] = business_id
except Exception as e:
try:
self._delete_app_type(app_type_id=app_type_id)
except ApiClientError:
return {}
except Exception as e:
raise ValueError(
f'Business was not created | '
f'AppType was created with app_type_id = {app_type_id}'
f'App was not created | '
)
app: Dict = {}
try:
app: Dict = self._create_app(**app_metadata)
except Exception as e:
try:
self._delete_business_and_app_type(
business_id=business_id,
app_type_id=app_type_id,
)
except ApiClientError:
return {}
except Exception as e:
raise ValueError(f'App was not created | {e}')
return {
'business': business,
'app_type': app_type,
'app': app
}
def create_business_app_and_report(
self, app_type_id: str,
business_name: str,
app_metadata: Dict,
report_metadata: Dict,
) -> Dict[str, Dict]:
"""
"""
business: Dict = self.create_business(business_name)
business_id: str = business['id']
app_metadata['business_id'] = business_id
app_metadata['app_type_id'] = app_type_id
try:
app: Dict = self.create_app(
business_id=business_id,
app_metadata=app_metadata,
)
app_id = app['id']
except Exception as e:
try:
self.delete_business(business_id)
except ApiClientError:
return {}
except Exception as e:
raise ValueError(
f'{e} | Business with business_id {business_id} '
f'created that probably wants to be removed | '
f'App was not created | '
f'Report was not created'
)
try:
report: Dict = self.create_report(
business_id=business_id,
app_id=app_id,
report_metadata=report_metadata,
)
except Exception as e:
try:
self._delete_business_and_app(
business_id=business_id,
app_id=app_id,
)
except ApiClientError:
return {}
except Exception as e_dba:
raise ValueError(
f'{e} | {e_dba} | Report was not created'
)
return {}
return {
'business': business,
'app': app,
'report': report,
}
def create_business_app_type_app_and_report(
self, business_name: str,
app_type_metadata: Dict,
app_metadata: Dict,
report_metadata: Dict,
) -> Dict[str, Dict]:
"""
"""
d = self.create_business_app_and_app_type(
business_name=business_name,
app_type_metadata=app_type_metadata,
app_metadata=app_metadata,
)
business_id: str = d['business']['id']
app_id: str = d['app']['id']
try:
report: Dict = self.create_report(
business_id=business_id,
app_id=app_id,
report_metadata=report_metadata,
)
except Exception as e:
try:
self._delete_business_and_app(
business_id=business_id,
app_id=app_id,
)
except ApiClientError:
return {}
except Exception as e_:
raise ValueError(
f'{e} | {e_} | Report was not created'
)
try:
app_type_id: str = d['app_type']['id']
self.delete_app_type(app_type_id)
except Exception as e_:
raise ValueError(
f'{e_} | Report was not created | '
f'App type was created with app_type_id: {app_type_id}'
)
return {}
return {
'app_type': d['app_type'],
'business': d['business'],
'app': d['app'],
'report': report,
}
class UniverseExplorerApi:
""""""
get_universe_businesses = CascadeExplorerAPI.get_universe_businesses
get_universe_app_types = CascadeExplorerAPI.get_universe_app_types
class BusinessExplorerApi:
""""""
get_business = GetExplorerAPI.get_business
get_universe_businesses = CascadeExplorerAPI.get_universe_businesses
_find_business_by_name_filter = CascadeExplorerAPI.find_business_by_name_filter
create_business = CreateExplorerAPI.create_business
update_business = UpdateExplorerAPI.update_business
get_business_apps = CascadeExplorerAPI.get_business_apps
get_business_app_ids = CascadeExplorerAPI.get_business_app_ids
get_business_all_apps_with_filter = CascadeExplorerAPI.get_business_apps_with_filter
delete_business = DeleteExplorerApi.delete_business
class AppTypeExplorerApi:
""""""
_create_normalized_name = CreateExplorerAPI._create_normalized_name
_create_key_name = CreateExplorerAPI._create_key_name
get_app_type = GetExplorerAPI.get_app_type
get_universe_app_types = CascadeExplorerAPI.get_universe_app_types
_find_app_type_by_name_filter = CascadeExplorerAPI.find_app_type_by_name_filter
create_app_type = CreateExplorerAPI.create_app_type
update_app_type = UpdateExplorerAPI.update_app_type
delete_app_type = DeleteExplorerApi.delete_app_type
class AppExplorerApi:
_create_normalized_name = CreateExplorerAPI._create_normalized_name
_create_key_name = CreateExplorerAPI._create_key_name
get_app = GetExplorerAPI.get_app
create_app = CreateExplorerAPI.create_app
update_app = UpdateExplorerAPI.update_app
_get_business_apps = CascadeExplorerAPI.get_business_apps
get_business_apps = CascadeExplorerAPI.get_business_apps
find_app_by_name_filter = CascadeExplorerAPI.find_app_by_name_filter
get_app_reports = CascadeExplorerAPI.get_app_reports
get_app_report_ids = CascadeExplorerAPI.get_app_report_ids
get_app_path_names = CascadeExplorerAPI.get_app_path_names
get_app_reports_by_filter = MultiCascadeExplorerAPI.get_app_reports_by_filter
get_app_by_type = CascadeExplorerAPI.get_app_by_type
get_app_type = CascadeExplorerAPI.get_app_type
get_app_by_name = CascadeExplorerAPI.get_app_by_name
delete_app = DeleteExplorerApi.delete_app
class PathExplorerApi:
_get_report = GetExplorerAPI.get_report
_update_report = UpdateExplorerAPI.update_report
_get_app_reports = CascadeExplorerAPI.get_app_reports
_get_app_path_names = CascadeExplorerAPI.get_app_path_names
get_path_reports = MultiCascadeExplorerAPI.get_path_reports
get_path_report_ids = MultiCascadeExplorerAPI.get_path_report_ids
class ReportExplorerApi:
get_report = GetExplorerAPI.get_report
get_report_data = GetExplorerAPI.get_report_data
_get_report_with_data = GetExplorerAPI._get_report_with_data
_get_app_reports = CascadeExplorerAPI.get_app_reports
create_report = CreateExplorerAPI.create_report
create_app_and_report = MultiCreateApi.create_app_and_report
update_report = UpdateExplorerAPI.update_report
get_business_id_by_report = MultiCascadeExplorerAPI.get_business_id_by_report
delete_report = DeleteExplorerApi.delete_report
class ExplorerApi(
CreateExplorerAPI,
DeleteExplorerApi,
):
"""Get Businesses, Apps, Paths and Reports in any possible combination
"""
def __init__(self, api_client):
super().__init__(api_client)
# TODO WiP
def has_app_report_data(self, business_id: str, app_id: str) -> bool:
""""""
report_ids: List[str] = self.get_app_report_ids(
business_id=business_id, app_id=app_id
)
for report_id in report_ids:
result: bool = self.has_report_report_entries(report_id)
if result:
return True
return False
# TODO WiP
def has_path_data(self, business_id: str, app_id: str, path_name: str) -> bool:
""""""
report_ids: List[str] = self.get_app_report_ids(
business_id=business_id, app_id=app_id
)
for report_id in report_ids:
result: bool = self.has_report_report_entries(report_id)
if result:
return True
return False
|
nilq/baby-python
|
python
|
import datetime
from django.http import HttpResponse
from django.urls import Resolver404
from blog.auth import authorize
from blog.models import Article
def dispatch(request, *args, **kwargs):
if request.method == 'GET':
return index(request, *args, **kwargs)
elif request.method == "POST":
return create(request, *args, **kwargs)
else:
raise Resolver404
def item_dispatch(request, *args, **kwargs):
if request.method == 'GET':
return show(request, *args, **kwargs)
elif request.method == "PUT":
return update(request, *args, **kwargs)
elif request.method == "DELETE":
return delete(request, *args, **kwargs)
else:
raise Resolver404
def index(request):
authorize(request.user, 'read', Article)
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def create(request):
authorize(request.user, 'create', Article)
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def show(request, article_id):
article = Article.objects.get(pk=article_id)
authorize(request.user, 'read', article)
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def update(request, article_id):
article = Article.objects.get(pk=article_id)
authorize(request.user, 'update', article)
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
def delete(request, article_id):
article = Article.objects.get(pk=article_id)
authorize(request.user, 'delete', article)
now = datetime.datetime.now()
html = "<html><body>It is now %s.</body></html>" % now
return HttpResponse(html)
|
nilq/baby-python
|
python
|
"""Tests the probflow.models module when backend = tensorflow"""
import pytest
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
from probflow.core.settings import Sampling
import probflow.core.ops as O
from probflow.distributions import Normal
from probflow.parameters import *
from probflow.modules import *
from probflow.models import *
from probflow.data import DataGenerator
def is_close(a, b, tol=1e-3):
return np.abs(a-b) < tol
def test_Model_0D():
"""Tests the probflow.models.Model abstract base class"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter(name='Weight')
self.bias = Parameter(name='Bias')
self.std = ScaleParameter(name='Std')
def __call__(self, x):
return Normal(x*self.weight() + self.bias(), self.std())
# Instantiate the model
my_model = MyModel()
# Shouldn't be training
assert my_model._is_training is False
# Fit the model
x = np.random.randn(100).astype('float32')
y = -x + 1
my_model.fit(x, y, batch_size=5, epochs=10)
# Shouldn't be training
assert my_model._is_training is False
# Should be able to set learning rate
lr = my_model._learning_rate
my_model.set_learning_rate(lr+1.0)
assert lr != my_model._learning_rate
# predictive samples
samples = my_model.predictive_sample(x[:30], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 30
# aleatoric samples
samples = my_model.aleatoric_sample(x[:30], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 30
# epistemic samples
samples = my_model.epistemic_sample(x[:30], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 30
# predict
samples = my_model.predict(x[:30])
assert isinstance(samples, np.ndarray)
assert samples.ndim == 1
assert samples.shape[0] == 30
# metric
metric = my_model.metric('mae', x[:30], y[:30])
assert isinstance(metric, np.floating)
metric = my_model.metric('mse', x[:30], y[:30])
assert isinstance(metric, np.floating)
assert metric >= 0
# posterior_mean w/ no args should return all params
val = my_model.posterior_mean()
assert isinstance(val, dict)
assert len(val) == 3
assert 'Weight' in val
assert 'Bias' in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
# posterior_mean w/ str should return value of that param
val = my_model.posterior_mean('Weight')
assert isinstance(val, np.ndarray)
assert val.ndim == 1
# posterior_mean w/ list of params should return only those params
val = my_model.posterior_mean(['Weight', 'Std'])
assert isinstance(val, dict)
assert len(val) == 2
assert 'Weight' in val
assert 'Bias' not in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
# posterior_sample w/ no args should return all params
val = my_model.posterior_sample(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert 'Weight' in val
assert 'Bias' in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 2 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
assert all(val[v].shape[1] == 1 for v in val)
# posterior_sample w/ str should return sample of that param
val = my_model.posterior_sample('Weight', n=20)
assert isinstance(val, np.ndarray)
assert val.ndim == 2
assert val.shape[0] == 20
assert val.shape[1] == 1
# posterior_sample w/ list of params should return only those params
val = my_model.posterior_sample(['Weight', 'Std'], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert 'Weight' in val
assert 'Bias' not in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 2 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
assert all(val[v].shape[1] == 1 for v in val)
# posterior_ci should return confidence intervals of all params by def
val = my_model.posterior_ci(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert 'Weight' in val
assert 'Bias' in val
assert 'Std' in val
assert all(isinstance(val[v], tuple) for v in val)
assert all(isinstance(val[v][0], np.ndarray) for v in val)
assert all(isinstance(val[v][1], np.ndarray) for v in val)
assert all(val[v][0].ndim == 1 for v in val)
assert all(val[v][1].ndim == 1 for v in val)
assert all(val[v][0].shape[0] == 1 for v in val)
assert all(val[v][1].shape[0] == 1 for v in val)
# posterior_ci should return ci of only 1 if passed str
val = my_model.posterior_ci('Weight', n=20)
assert isinstance(val, tuple)
assert isinstance(val[0], np.ndarray)
assert isinstance(val[1], np.ndarray)
# posterior_ci should return specified cis if passed list of params
val = my_model.posterior_ci(['Weight', 'Std'], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert 'Weight' in val
assert 'Bias' not in val
assert 'Std' in val
assert all(isinstance(val[v], tuple) for v in val)
assert all(isinstance(val[v][0], np.ndarray) for v in val)
assert all(isinstance(val[v][1], np.ndarray) for v in val)
assert all(val[v][0].ndim == 1 for v in val)
assert all(val[v][1].ndim == 1 for v in val)
assert all(val[v][0].shape[0] == 1 for v in val)
assert all(val[v][1].shape[0] == 1 for v in val)
# prior_sample w/ no args should return all params
val = my_model.prior_sample(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert 'Weight' in val
assert 'Bias' in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
# prior_sample w/ str should return sample of that param
val = my_model.prior_sample('Weight', n=20)
assert isinstance(val, np.ndarray)
assert val.ndim == 1
assert val.shape[0] == 20
# prior_sample w/ list of params should return only those params
val = my_model.prior_sample(['Weight', 'Std'], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert 'Weight' in val
assert 'Bias' not in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
# log_prob should return log prob of each sample by default
probs = my_model.log_prob(x[:30], y[:30])
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 30
# log_prob should return sum if individually = False
s_prob = my_model.log_prob(x[:30], y[:30], individually=False)
assert isinstance(s_prob, np.floating)
assert s_prob == np.sum(probs)
# log_prob should return samples w/ distribution = True
probs = my_model.log_prob(x[:30], y[:30], n=10, distribution=True)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 2
assert probs.shape[0] == 30
assert probs.shape[1] == 10
# log_prob should return samples w/ distribution = True
probs = my_model.log_prob(x[:30], y[:30], n=10,
distribution=True, individually=False)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 10
# prob should return prob of each sample by default
probs = my_model.prob(x[:30], y[:30])
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 30
assert np.all(probs >= 0)
# prob should return sum if individually = False
s_prob = my_model.prob(x[:30], y[:30], individually=False)
assert isinstance(s_prob, np.floating)
# prob should return samples w/ distribution = True
probs = my_model.prob(x[:30], y[:30], n=10, distribution=True)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 2
assert probs.shape[0] == 30
assert probs.shape[1] == 10
assert np.all(probs >= 0)
# prob should return samples w/ distribution = True
probs = my_model.prob(x[:30], y[:30], n=10,
distribution=True, individually=False)
assert isinstance(probs, np.ndarray)
assert probs.ndim == 1
assert probs.shape[0] == 10
assert np.all(probs >= 0)
def test_Model_DataGenerators():
"""Tests the probflow.models.Model sampling/predictive methods when
passed DataGenerators"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter(name='Weight')
self.bias = Parameter(name='Bias')
self.std = ScaleParameter(name='Std')
def __call__(self, x):
return Normal(x*self.weight() + self.bias(), self.std())
# Instantiate the model
my_model = MyModel()
# Make a DataGenerator
x = np.random.randn(100).astype('float32')
y = -x + 1
data = DataGenerator(x, y, batch_size=5)
# Fit the model
my_model.fit(data, epochs=10)
# predictive samples
samples = my_model.predictive_sample(data, n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 100
# aleatoric samples
samples = my_model.aleatoric_sample(data, n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 100
# epistemic samples
samples = my_model.epistemic_sample(data, n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 50
assert samples.shape[1] == 100
# predict
samples = my_model.predict(data)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 1
assert samples.shape[0] == 100
# metric
metric = my_model.metric('mae', data)
assert isinstance(metric, np.floating)
metric = my_model.metric('mse', data)
assert isinstance(metric, np.floating)
assert metric >= 0
def test_Model_1D():
"""Tests the probflow.models.Model abstract base class"""
class MyModel(Model):
def __init__(self):
self.weight = Parameter([5, 1], name='Weight')
self.bias = Parameter([1, 1], name='Bias')
self.std = ScaleParameter([1, 1], name='Std')
def __call__(self, x):
return Normal(x@self.weight() + self.bias(), self.std())
# Instantiate the model
my_model = MyModel()
# Shouldn't be training
assert my_model._is_training is False
# Data
x = np.random.randn(100, 5).astype('float32')
w = np.random.randn(5, 1).astype('float32')
y = x@w + 1
# Fit the model
my_model.fit(x, y, batch_size=5, epochs=10)
# predictive samples
samples = my_model.predictive_sample(x[:30, :], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 3
assert samples.shape[0] == 50
assert samples.shape[1] == 30
assert samples.shape[2] == 1
# aleatoric samples
samples = my_model.aleatoric_sample(x[:30, :], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 3
assert samples.shape[0] == 50
assert samples.shape[1] == 30
assert samples.shape[2] == 1
# epistemic samples
samples = my_model.epistemic_sample(x[:30, :], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 3
assert samples.shape[0] == 50
assert samples.shape[1] == 30
assert samples.shape[2] == 1
# predict
samples = my_model.predict(x[:30, :])
assert isinstance(samples, np.ndarray)
assert samples.ndim == 2
assert samples.shape[0] == 30
assert samples.shape[1] == 1
# metric
metric = my_model.metric('mse', x[:30, :], y[:30, :])
assert isinstance(metric, np.floating)
metric = my_model.metric('mae', x[:30, :], y[:30, :])
assert isinstance(metric, np.floating)
assert metric >= 0
# posterior_mean w/ no args should return all params
val = my_model.posterior_mean()
assert isinstance(val, dict)
assert len(val) == 3
assert 'Weight' in val
assert 'Bias' in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 2 for v in val)
assert val['Weight'].shape[0] == 5
assert val['Weight'].shape[1] == 1
assert val['Bias'].shape[0] == 1
assert val['Bias'].shape[1] == 1
assert val['Std'].shape[0] == 1
assert val['Std'].shape[1] == 1
# posterior_mean w/ str should return value of that param
val = my_model.posterior_mean('Weight')
assert isinstance(val, np.ndarray)
assert val.ndim == 2
assert val.shape[0] == 5
assert val.shape[1] == 1
# posterior_mean w/ list of params should return only those params
val = my_model.posterior_mean(['Weight', 'Std'])
assert isinstance(val, dict)
assert len(val) == 2
assert 'Weight' in val
assert 'Bias' not in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 2 for v in val)
assert val['Weight'].shape[0] == 5
assert val['Weight'].shape[1] == 1
assert val['Std'].shape[0] == 1
assert val['Std'].shape[1] == 1
# posterior_sample w/ no args should return all params
val = my_model.posterior_sample(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert 'Weight' in val
assert 'Bias' in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 3 for v in val)
assert val['Weight'].shape[0] == 20
assert val['Weight'].shape[1] == 5
assert val['Weight'].shape[2] == 1
assert val['Bias'].shape[0] == 20
assert val['Bias'].shape[1] == 1
assert val['Bias'].shape[2] == 1
assert val['Std'].shape[0] == 20
assert val['Std'].shape[1] == 1
assert val['Std'].shape[2] == 1
# posterior_sample w/ str should return sample of that param
val = my_model.posterior_sample('Weight', n=20)
assert isinstance(val, np.ndarray)
assert val.ndim == 3
assert val.shape[0] == 20
assert val.shape[1] == 5
assert val.shape[2] == 1
# posterior_sample w/ list of params should return only those params
val = my_model.posterior_sample(['Weight', 'Std'], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert 'Weight' in val
assert 'Bias' not in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 3 for v in val)
assert val['Weight'].shape[0] == 20
assert val['Weight'].shape[1] == 5
assert val['Weight'].shape[2] == 1
assert val['Std'].shape[0] == 20
assert val['Std'].shape[1] == 1
assert val['Std'].shape[2] == 1
# posterior_ci should return confidence intervals of all params by def
val = my_model.posterior_ci(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert 'Weight' in val
assert 'Bias' in val
assert 'Std' in val
assert all(isinstance(val[v], tuple) for v in val)
assert all(isinstance(val[v][0], np.ndarray) for v in val)
assert all(isinstance(val[v][1], np.ndarray) for v in val)
assert all(val[v][0].ndim == 2 for v in val)
assert all(val[v][1].ndim == 2 for v in val)
for i in range(1):
assert val['Weight'][i].shape[0] == 5
assert val['Weight'][i].shape[1] == 1
assert val['Bias'][i].shape[0] == 1
assert val['Bias'][i].shape[1] == 1
assert val['Std'][i].shape[0] == 1
assert val['Std'][i].shape[1] == 1
# posterior_ci should return ci of only 1 if passed str
val = my_model.posterior_ci('Weight', n=20)
assert isinstance(val, tuple)
assert isinstance(val[0], np.ndarray)
assert isinstance(val[1], np.ndarray)
# posterior_ci should return specified cis if passed list of params
val = my_model.posterior_ci(['Weight', 'Std'], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert 'Weight' in val
assert 'Bias' not in val
assert 'Std' in val
assert all(isinstance(val[v], tuple) for v in val)
assert all(isinstance(val[v][0], np.ndarray) for v in val)
assert all(isinstance(val[v][1], np.ndarray) for v in val)
assert all(val[v][0].ndim == 2 for v in val)
assert all(val[v][1].ndim == 2 for v in val)
for i in range(1):
assert val['Weight'][i].shape[0] == 5
assert val['Weight'][i].shape[1] == 1
assert val['Std'][i].shape[0] == 1
assert val['Std'][i].shape[1] == 1
# prior_sample w/ no args should return all params
val = my_model.prior_sample(n=20)
assert isinstance(val, dict)
assert len(val) == 3
assert 'Weight' in val
assert 'Bias' in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
# prior_sample w/ str should return sample of that param
val = my_model.prior_sample('Weight', n=20)
assert isinstance(val, np.ndarray)
assert val.ndim == 1
assert val.shape[0] == 20
# prior_sample w/ list of params should return only those params
val = my_model.prior_sample(['Weight', 'Std'], n=20)
assert isinstance(val, dict)
assert len(val) == 2
assert 'Weight' in val
assert 'Bias' not in val
assert 'Std' in val
assert all(isinstance(val[v], np.ndarray) for v in val)
assert all(val[v].ndim == 1 for v in val)
assert all(val[v].shape[0] == 20 for v in val)
def test_Model_nesting():
"""Tests Model when it contains Modules and sub-modules"""
class MyModule(Module):
def __init__(self):
self.weight = Parameter([5, 1], name='Weight')
self.bias = Parameter([1, 1], name='Bias')
def __call__(self, x):
return x@self.weight() + self.bias()
class MyModel(Model):
def __init__(self):
self.module = MyModule()
self.std = ScaleParameter([1, 1], name='Std')
def __call__(self, x):
return Normal(self.module(x), self.std())
# Instantiate the model
my_model = MyModel()
# Shouldn't be training
assert my_model._is_training is False
# Data
x = np.random.randn(100, 5).astype('float32')
w = np.random.randn(5, 1).astype('float32')
y = x@w + 1
# Fit the model
my_model.fit(x, y, batch_size=5, epochs=10)
# predictive samples
samples = my_model.predictive_sample(x[:30, :], n=50)
assert isinstance(samples, np.ndarray)
assert samples.ndim == 3
assert samples.shape[0] == 50
assert samples.shape[1] == 30
assert samples.shape[2] == 1
# kl loss should be greater for outer model
assert my_model.kl_loss().numpy() > my_model.module.kl_loss().numpy()
def test_ContinuousModel():
"""Tests probflow.models.ContinuousModel"""
pass
#TODO
def test_DiscreteModel():
"""Tests probflow.models.DiscreteModel"""
pass
#TODO
def test_CategoricalModel():
"""Tests probflow.models.CategoricalModel"""
pass
#TODO
|
nilq/baby-python
|
python
|
from datetime import timedelta
from fastapi import APIRouter, Depends, HTTPException
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
from starlette import status
import app.core.db.crud as crud
router = APIRouter()
@router.post("users/token", tags=["auth"])
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(crud.get_db)):
user = crud.authenticate_user(db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Bad credentials",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=crud.ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = crud.create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
|
nilq/baby-python
|
python
|
import time
from gpioServo import MotorControls
motor = MotorControls()
import numpy as np
key_strokes = np.load('train_data.npy',encoding='latin1')
#with open("key_strokes.txt",'r') as keys:
# key_strokes = keys.readlines()
#key_strokes = [x.strip() for x in key_strokes]
key_strokes = [x['input'] for x in key_strokes]
print([x for x in key_strokes])
for x in key_strokes:
if x == [1,0,0,0]:
print('moving forward')
motor.forward()
elif x == [0,0,0,1]:
print('stopping')
motor.stop()
elif x == [0,1,0,0]:
print('turning left')
motor.turn1()
elif x == [0,1,0,0]:
print('turning right')
time.sleep(0.05)
motor.end()
|
nilq/baby-python
|
python
|
import tempfile
import os
import geohash.lock
def test_lock_thread():
lck = geohash.lock.ThreadSynchronizer()
assert not lck.lock.locked()
with lck:
assert lck.lock.locked()
assert not lck.lock.locked()
def test_lock_process() -> None:
path = tempfile.NamedTemporaryFile().name
assert not os.path.exists(path)
lck = geohash.lock.ProcessSynchronizer(path)
assert not os.path.exists(path)
assert not lck.lock.locked()
with lck:
assert lck.lock.locked()
lck2 = geohash.lock.ProcessSynchronizer(path, timeout=0.5)
try:
with lck2:
assert False
except geohash.lock.LockError:
pass
assert os.path.exists(path)
assert not os.path.exists(path)
|
nilq/baby-python
|
python
|
A, B, W = map(int, input().split())
W *= 1000
mx = 0
mn = 1000*1000
for i in range(1, 1000*1000+1):
if A*i <= W <= B*i:
mn = min(mn, i)
mx = max(mx, i)
if mx == 0:
print('UNSATISFIABLE')
else:
print(mn, mx)
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorKNN()
while(True):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame', fgmask)
if cv2.waitKey(30) == 27:
break
cap.release()
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
from requests import Session, request
from urllib.parse import urljoin
class FlaskSession(Session):
def __init__(self, app=None, config_prefix="MICROSERVICE"):
super(FlaskSession, self).__init__()
self.config_prefix = config_prefix
self.service_url = None
self.service_port = None
if app is not None:
self.init_app(app)
def init_app(self, app):
self.service_url = app.config.get("{}_URL".format(self.config_prefix))
self.service_port = app.config.get(
"{}_PORT".format(self.config_prefix), None)
def request(self, method, url, *args, **kwargs):
if self.service_port:
url = urljoin("{}:{}".format(self.service_url,
self.service_port),
url)
else:
url = urljoin(self.service_url, url)
return super(FlaskSession, self).request(method, url, *args, **kwargs)
class FlaskRequest:
def __init__(self, app=None, config_prefix="MICROSERVICE"):
self.config_prefix = config_prefix
self.service_url = None
self.service_port = None
if app is not None:
self.init_app(app)
def init_app(self, app):
self.service_url = app.config.get("{}_URL".format(self.config_prefix))
self.service_port = app.config.get(
"{}_PORT".format(self.config_prefix), None)
def request(self, method, url, *args, **kwargs):
if self.service_port:
url = urljoin("{}:{}".format(self.service_url,
self.service_port),
url)
else:
url = urljoin(self.service_url, url)
return request(method, url, *args, **kwargs)
|
nilq/baby-python
|
python
|
from GetFile import getContents
from SortedSearch import find_le_index
from blist import blist
# ------Classes------ #
class Rectangle:
def __init__(self, rectID, x, y, width, height):
self.rectID = rectID
self.x = x
self.y = y
self.width = width
self.height = height
# Assuming string of format 'rectID @ x,y: widthxheight'
def __init__(self, string):
parts = string.split(' ')
self.rectID = parts[0]
xyParts = parts[2][:(len(parts[2]) - 1)].split(',')
self.x = int(xyParts[0])
self.y = int(xyParts[1])
wlParts = parts[3].split('x')
self.width = int(wlParts[0])
self.height = int(wlParts[1])
class xLine:
def __init__(self, x, rectID, isLeftEdge):
self.x = x
self.rectID = rectID
self.isLeftEdge = isLeftEdge
class sweepLinePoint:
def __init__(self, y, claims, pointsHere, lowerEdges=set()):
self.y = y
self.claims = claims
self.pointsHere = pointsHere
self.lowerEdges = set()
self.rectIDs = lowerEdges
def addClaim(self):
self.claims += 1
def removeClaim(self):
self.claims -= 1
def addPoint(self):
self.pointsHere += 1
def removePoint(self):
self.pointsHere -= 1
def addLowerEdge(self, rectID):
self.lowerEdges.add(rectID)
def removeLowerEdge(self, rectID):
self.lowerEdges.discard(rectID)
def getLenLowerEdges(self):
return len(self.lowerEdges)
def addRectID(self, rectID):
self.rectIDs.add(rectID)
def removeRectID(self, rectID):
self.rectIDs.discard(rectID)
def getLenRectIDs(self):
return len(self.rectIDs)
class sweepLine:
def __init__(self):
self.sweepLineList = blist()
self.yCoords = blist()
self.heightCovered = 0
def addOrRemoveRange(self, yRange, shouldAdd, rectID):
startIndex = self.addOrRemovePoint(yRange[0], shouldAdd, False, rectID)
endIndex = self.addOrRemovePoint(yRange[1], shouldAdd, True, rectID)
prevNonEndingClaims = -1
prevYCoord = -1
entriesToDelete = []
for j, slp in enumerate(self.sweepLineList[startIndex:endIndex+1]):
if shouldAdd:
slp.addClaim()
slp.addRectID(rectID)
adjustedOverlaps = slp.rectIDs.difference(slp.lowerEdges)
if len(adjustedOverlaps) > 1:
for rid in adjustedOverlaps:
overlapsDictionary.pop(rid, None)
else:
slp.removeClaim()
slp.removeRectID(rectID)
if slp.pointsHere == 0:
entriesToDelete = [startIndex + j] + entriesToDelete
if shouldAdd and ((prevNonEndingClaims == 2 and slp.claims >= 2) or (prevNonEndingClaims >= 2 and slp.claims == 2)):
self.heightCovered += slp.y - prevYCoord
if not shouldAdd and ((prevNonEndingClaims == 1 and slp.claims >= 1) or (prevNonEndingClaims >= 1 and slp.claims == 1)):
self.heightCovered -= slp.y - prevYCoord
prevNonEndingClaims = slp.claims - slp.getLenLowerEdges()
prevYCoord = slp.y
for toDelete in entriesToDelete:
del self.sweepLineList[toDelete]
del self.yCoords[toDelete]
def addOrRemovePoint(self, point, shouldAdd, isLowerEdge, rectID):
if not self.yCoords:
if not shouldAdd:
raise ValueError
else:
self.yCoords.append(point)
newSLP = sweepLinePoint(point, 0, 1)
self.sweepLineList.append(newSLP)
return 0
else:
try:
indexBefore = find_le_index(self.yCoords, point)
except ValueError:
indexBefore = -1
if (not indexBefore == -1) and self.yCoords[indexBefore] == point:
if shouldAdd:
self.sweepLineList[indexBefore].addPoint()
if isLowerEdge:
self.sweepLineList[indexBefore].addLowerEdge(rectID)
else:
self.sweepLineList[indexBefore].removePoint()
if isLowerEdge:
self.sweepLineList[indexBefore].removeLowerEdge(rectID)
return indexBefore
else:
insertAt = indexBefore + 1
self.yCoords.insert(insertAt, point)
if insertAt == 0 or insertAt == len(self.yCoords) - 1:
claims = 0
lowerEdges = set()
else:
claims = self.sweepLineList[indexBefore].claims - self.sweepLineList[indexBefore].getLenLowerEdges()
lowerEdges = self.sweepLineList[indexBefore].rectIDs.difference(self.sweepLineList[indexBefore].lowerEdges)
newSLP = sweepLinePoint(point, claims, 1, lowerEdges)
if isLowerEdge:
newSLP.addLowerEdge(rectID)
self.sweepLineList.insert(insertAt, newSLP)
return insertAt
# ------Input------ #
answer1 = 0
answer2 = 0
contents = getContents(3, True)
rectangleStrings = contents.split("\n")
rectangles = list(map(Rectangle, rectangleStrings))
# ------Part 1 & 2------ #
rectangleDictionary = {rectObj.rectID: rectObj for rectObj in rectangles}
overlapsDictionary = {rectObj.rectID: 0 for rectObj in rectangles}
xLines = list(map(lambda rectObj: xLine(rectObj.x, rectObj.rectID, True), rectangles)) \
+ list(map(lambda rectObj: xLine(rectObj.x + rectObj.width, rectObj.rectID, False), rectangles))
xLines = sorted(xLines, key=lambda xl: (xl.x, xl.isLeftEdge, rectangleDictionary[xl.rectID].y))
sl = sweepLine()
previousX = 0
intersectionArea = 0
for i, xl in enumerate(xLines):
intersectionArea += sl.heightCovered * (xl.x - previousX)
currRectangle = rectangleDictionary[xl.rectID]
yRange = (currRectangle.y, currRectangle.y + currRectangle.height)
sl.addOrRemoveRange(yRange, xl.isLeftEdge, xl.rectID)
previousX = xl.x
answer1 = intersectionArea
answer2 = list(overlapsDictionary)[0]
# ------Output------ #
print("Answer 1: " + str(answer1))
print("Answer 2: " + str(answer2))
|
nilq/baby-python
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DataTableResponseColumn(Model):
"""Column definition.
:param column_name: Name of the column
:type column_name: str
:param data_type: Data type which looks like 'String' or 'Int32'.
:type data_type: str
:param column_type: Column Type
:type column_type: str
"""
_attribute_map = {
'column_name': {'key': 'columnName', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
'column_type': {'key': 'columnType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DataTableResponseColumn, self).__init__(**kwargs)
self.column_name = kwargs.get('column_name', None)
self.data_type = kwargs.get('data_type', None)
self.column_type = kwargs.get('column_type', None)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from os import environ
import os.path
import re
from .compat import (
string_type,
json,
urlparse,
urljoin
)
from .exceptions import (
InvalidResourcePath,
)
EVENTBRITE_API_URL = environ.get(
'EVENTBRITE_API_URL', 'https://www.eventbriteapi.com/v3/')
EVENTBRITE_API_PATH = urlparse(EVENTBRITE_API_URL).path
URL_MAP_FILE = os.path.join(
os.path.dirname(__file__), "apiv3_url_mapping.json")
def get_mapping(_compiled_mapping=[]):
if _compiled_mapping:
return _compiled_mapping
try:
mapping = json.load(open(URL_MAP_FILE))
for endpoint in mapping:
endpoint["url_regexp"] = re.compile(endpoint["url_regexp"])
_compiled_mapping = mapping
return _compiled_mapping
except Exception:
raise # TODO: do we handle it here?
def format_path(path, eventbrite_api_url=EVENTBRITE_API_URL):
error_msg = "The path argument must be a string that begins with '/'"
if not isinstance(path, string_type):
raise InvalidResourcePath(error_msg)
# Probably a webhook path
if path.startswith(eventbrite_api_url):
return path
# Using the HTTP shortcut
if path.startswith("/"):
return urljoin(eventbrite_api_url, path.lstrip('/'))
raise InvalidResourcePath(error_msg)
def construct_namespaced_dict(namespace, unfiltered_dict):
result_dict = {namespace: {}}
for key, value in unfiltered_dict.items():
if key.startswith(namespace):
result_dict[namespace][key[len(namespace) + 1:]] = value
return result_dict
def get_webhook_from_request(request):
if hasattr(request, "get_json"):
return request.get_json()
return request
|
nilq/baby-python
|
python
|
import wx
import prefs
from theme import Theme
from wx.lib.expando import ExpandoTextCtrl, EVT_ETC_LAYOUT_NEEDED
class PrefsEditor(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, size=(500,500), title="WxPyMOO Preferences")
self.parent = parent
panel = wx.Panel(self)
self.book = wx.Notebook(self)
self.general_page = self.createGeneralPanel()
self.fonts_page = self.createFontPanel ()
self.paths_page = self.createPathsPanel ()
self.book.AddPage(self.general_page, "General")
self.book.AddPage(self.fonts_page, "Fonts and Colors")
self.book.AddPage(self.paths_page, "Paths and Dirs")
sizer = wx.BoxSizer(wx.VERTICAL)
button_sizer = self.CreateButtonSizer( wx.OK | wx.CANCEL )
sizer.Add(self.book, 1, wx.EXPAND | wx.ALL, 10)
sizer.Add(button_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.BOTTOM, 5)
self.SetSizer(sizer)
self.Centre(wx.BOTH)
self.update_sample_text(None)
self.Bind(wx.EVT_BUTTON, self.update_prefs, id = wx.ID_OK)
def update_prefs(self, evt):
prefs.update(self)
evt.Skip()
def createGeneralPanel(self):
gp = wx.Panel(self.book)
gp.save_size_checkbox = wx.CheckBox(gp, -1, 'Save Window Size')
gp.save_size_checkbox.SetValue( prefs.get('save_window_size') )
gp.autoconnect_checkbox = wx.CheckBox(gp, -1, 'Autoconnect to last world at startup')
gp.autoconnect_checkbox.SetValue( prefs.get('autoconnect_last_world') )
gp.xmouse_checkbox = wx.CheckBox(gp, -1, 'Use X-style mouse copy/paste behavior')
gp.xmouse_checkbox.SetValue( prefs.get('use_x_copy_paste') )
gp.local_echo_checkbox = wx.CheckBox(gp, -1, 'Echo Typed Commands')
gp.local_echo_checkbox.SetValue( prefs.get('local_echo') )
gp.scroll_on_output_checkbox = wx.CheckBox(gp, -1, 'Scroll to bottom when new text arrives')
gp.scroll_on_output_checkbox.SetValue( prefs.get('scroll_on_output') )
gp.panel_sizer = wx.BoxSizer(wx.VERTICAL)
gp.panel_sizer.Add(gp.save_size_checkbox, flag = wx.ALL, border = 10)
gp.panel_sizer.Add(gp.autoconnect_checkbox, flag = wx.ALL, border = 10)
gp.panel_sizer.Add(gp.xmouse_checkbox, flag = wx.ALL, border = 10)
gp.panel_sizer.Add(gp.local_echo_checkbox, flag = wx.ALL, border = 10)
gp.panel_sizer.Add(gp.scroll_on_output_checkbox, flag = wx.ALL, border = 10)
gp.SetSizer(gp.panel_sizer)
return gp
def createFontPanel(self):
fcp = wx.Panel(self.book)
font = wx.Font(prefs.get('font'))
fgcolour = prefs.get('fgcolour')
bgcolour = prefs.get('bgcolour')
# output sample/controls
fcp.sample = ExpandoTextCtrl(fcp, style = wx.TE_READONLY | wx.TE_RICH | wx.TE_MULTILINE , size = wx.Size(400,-1))
fcp.font_ctrl = wx.FontPickerCtrl (fcp, style = wx.FNTP_FONTDESC_AS_LABEL | wx.FNTP_USEFONT_FOR_LABEL, font = font)
fcp.theme_picker = wx.Choice(fcp, choices = Theme.all_theme_names())
fcp.ansi_checkbox = wx.CheckBox(fcp, -1, 'Use ANSI colors')
fcp.ansi_blink_checkbox = wx.CheckBox(fcp, -1, 'Honor ANSI blink')
# TODO - get and set these two at display time not create time
fcp.theme = prefs.get('theme')
fcp.theme_picker.SetSelection(fcp.theme_picker.FindString(fcp.theme))
if prefs.get('use_ansi'):
fcp.ansi_checkbox.SetValue(True)
fcp.theme_picker.Enable()
else:
fcp.ansi_checkbox.SetValue(False)
fcp.theme_picker.Disable()
ansi_sizer = wx.BoxSizer(wx.HORIZONTAL)
ansi_sizer.Add(fcp.ansi_checkbox, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER)
ansi_sizer.Add(fcp.ansi_blink_checkbox, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER)
ansi_sizer.Add(fcp.theme_picker, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER)
panel_sizer = wx.BoxSizer(wx.VERTICAL)
panel_sizer.Add(fcp.sample, 0, wx.RIGHT|wx.LEFT|wx.EXPAND|wx.TOP, 10)
panel_sizer.AddSpacer(10)
panel_sizer.Add(fcp.font_ctrl , 0, wx.EXPAND, 0)
panel_sizer.AddSpacer(10)
panel_sizer.Add(ansi_sizer, 0, wx.RIGHT|wx.LEFT|wx.EXPAND, 10)
self.Bind(wx.EVT_FONTPICKER_CHANGED , self.update_sample_text, fcp.font_ctrl)
self.Bind(wx.EVT_CHOICE , self.update_sample_text, fcp.theme_picker)
self.Bind(wx.EVT_CHECKBOX , self.update_sample_text, fcp.ansi_checkbox)
self.Bind(EVT_ETC_LAYOUT_NEEDED , self.resize_everything, fcp.sample)
fcp.SetSizer(panel_sizer)
fcp.Layout()
return fcp
def createPathsPanel(self):
pp = wx.Panel(self.book)
editor_label = wx.StaticText(pp, -1, "External Editor")
pp.external_editor = wx.TextCtrl(pp, -1, "")
pp.external_editor.SetValue( prefs.get('external_editor') )
editor_sizer = wx.FlexGridSizer(1,2,5,10)
editor_sizer.Add(editor_label, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 0)
editor_sizer.Add(pp.external_editor, 1, wx.EXPAND, 0)
editor_sizer.AddGrowableCol(1)
pp.panel_sizer = wx.BoxSizer(wx.VERTICAL)
pp.panel_sizer.Add(editor_sizer, 0, wx.EXPAND | wx.ALL, 10)
pp.SetSizer(pp.panel_sizer)
return pp
def resize_everything(self, evt):
self.Fit()
def update_sample_text(self, evt):
fp = self.fonts_page
theme = Theme.fetch(fp.theme_picker.GetStringSelection())
fgcolour = theme.get('foreground')
bgcolour = theme.get('background')
font = fp.font_ctrl.GetSelectedFont()
textattr = wx.TextAttr(fgcolour, bgcolour, font)
fp.sample.SetBackgroundColour(bgcolour)
fp.sample.SetValue("""
Emerson says, "This is what your window will look like."
Emerson waves around a brightly-colored banner.
It's super effective!
01234567 89ABCDEF
""")
fp.sample.SetStyle(0, fp.sample.GetLastPosition(), textattr)
# Mock up ANSI if ANSI pref is on
# TODO - maybe actually just shove ANSI-code-ful stuff through the actual output_panel ANSIfier?
if fp.ansi_checkbox.GetValue():
textattr.SetTextColour(theme.Colour('blue'))
fp.sample.SetStyle(1, 8, textattr)
fp.sample.SetStyle(58, 66,textattr)
textattr.SetTextColour(theme.Colour('red'))
fp.sample.SetStyle(81, 89, textattr)
textattr.SetTextColour(theme.Colour('yellow'))
fp.sample.SetStyle(90, 97, textattr)
textattr.SetTextColour(theme.Colour('green'))
fp.sample.SetStyle(98, 104, textattr)
fp.theme_picker.Enable()
textattr.SetTextColour(theme.Colour('white'))
textattr.SetFontWeight(wx.FONTWEIGHT_BOLD)
fp.sample.SetStyle(107, 128, textattr)
textattr.SetTextColour(theme.Colour('red', 'bright'))
fp.sample.SetStyle(112, 117, textattr)
textattr.SetFontWeight(wx.FONTWEIGHT_NORMAL)
textattr.SetTextColour(theme.Colour('black'))
fp.sample.SetStyle(130, 131, textattr)
textattr.SetTextColour(theme.Colour('red'))
fp.sample.SetStyle(131, 132, textattr)
textattr.SetTextColour(theme.Colour('green'))
fp.sample.SetStyle(132, 133, textattr)
textattr.SetTextColour(theme.Colour('yellow'))
fp.sample.SetStyle(133, 134, textattr)
textattr.SetTextColour(theme.Colour('blue'))
fp.sample.SetStyle(134, 135, textattr)
textattr.SetTextColour(theme.Colour('magenta'))
fp.sample.SetStyle(135, 136, textattr)
textattr.SetTextColour(theme.Colour('cyan'))
fp.sample.SetStyle(136, 137, textattr)
textattr.SetTextColour(theme.Colour('white'))
fp.sample.SetStyle(137, 138, textattr)
textattr.SetTextColour(fgcolour)
textattr.SetBackgroundColour(theme.Colour('black'))
fp.sample.SetStyle(139, 140, textattr)
textattr.SetBackgroundColour(theme.Colour('red'))
fp.sample.SetStyle(140, 141, textattr)
textattr.SetBackgroundColour(theme.Colour('green'))
fp.sample.SetStyle(141, 142, textattr)
textattr.SetBackgroundColour(theme.Colour('yellow'))
fp.sample.SetStyle(142, 143, textattr)
textattr.SetBackgroundColour(theme.Colour('blue'))
fp.sample.SetStyle(143, 144, textattr)
textattr.SetBackgroundColour(theme.Colour('magenta'))
fp.sample.SetStyle(144, 145, textattr)
textattr.SetBackgroundColour(theme.Colour('cyan'))
fp.sample.SetStyle(145, 146, textattr)
textattr.SetBackgroundColour(theme.Colour('white'))
fp.sample.SetStyle(146, 147, textattr)
else:
fp.theme_picker.Disable()
if evt: evt.Skip()
|
nilq/baby-python
|
python
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test densityTools."""
# pylint: disable=missing-function-docstring,missing-class-docstring,abstract-method,protected-access,no-member,disallowed-name,invalid-name
import unittest
from armi.utils import densityTools
from armi.nucDirectory import elements, nuclideBases
from armi.materials.uraniumOxide import UO2
class Test_densityTools(unittest.TestCase):
def test_expandElementalMassFracsToNuclides(self):
element = elements.bySymbol["N"]
mass = {"N": 1.0}
densityTools.expandElementalMassFracsToNuclides(mass, [(element, None)])
self.assertNotIn("N", mass)
self.assertIn("N15", mass)
self.assertIn("N14", mass)
self.assertAlmostEqual(sum(mass.values()), 1.0)
self.assertNotIn("N13", mass) # nothing unnatural.
def test_expandElementalZeroMassFrac(self):
"""As above, but try with a zero mass frac elemental."""
elementals = [(elements.bySymbol["N"], None), (elements.bySymbol["O"], None)]
mass = {"N": 0.0, "O": 1.0}
densityTools.expandElementalMassFracsToNuclides(mass, elementals)
self.assertNotIn("N", mass)
self.assertNotIn("O", mass)
# Current expectation is for elements with zero mass fraction get expanded and
# isotopes with zero mass remain in the dictionary.
self.assertIn("N14", mass)
self.assertAlmostEqual(sum(mass.values()), 1.0)
def test_getChemicals(self):
u235 = nuclideBases.byName["U235"]
u238 = nuclideBases.byName["U238"]
o16 = nuclideBases.byName["O16"]
uo2 = UO2()
uo2Chemicals = densityTools.getChemicals(uo2.p.massFrac)
for symbol in ["U", "O"]:
self.assertIn(symbol, uo2Chemicals.keys())
self.assertAlmostEqual(
uo2Chemicals["U"], uo2.p.massFrac["U235"] + uo2.p.massFrac["U238"], 6
)
self.assertAlmostEqual(uo2Chemicals["O"], uo2.p.massFrac["O"], 6)
# ensure getChemicals works if the nuclideBase is the dict key
massFrac = {u238: 0.87, u235: 0.12, o16: 0.01}
uo2Chemicals = densityTools.getChemicals(massFrac)
for symbol in ["U", "O"]:
self.assertIn(symbol, uo2Chemicals.keys())
self.assertAlmostEqual(uo2Chemicals["U"], massFrac[u235] + massFrac[u238], 2)
self.assertAlmostEqual(uo2Chemicals["O"], massFrac[o16], 2)
def test_expandElement(self):
"""Ensure isotopic subset feature works in expansion."""
elemental = elements.bySymbol["O"]
massFrac = 1.0
subset = [nuclideBases.byName["O16"], nuclideBases.byName["O17"]]
m1 = densityTools.expandElementalNuclideMassFracs(elemental, massFrac)
m2 = densityTools.expandElementalNuclideMassFracs(elemental, massFrac, subset)
self.assertIn("O18", m1)
self.assertNotIn("O18", m2)
self.assertAlmostEqual(1.0, sum(m1.values()))
self.assertAlmostEqual(1.0, sum(m2.values()))
# expect some small difference due to renormalization
self.assertNotAlmostEqual(m1["O17"], m2["O17"])
self.assertAlmostEqual(m1["O17"], m2["O17"], delta=1e-5)
def test_applyIsotopicsMix(self):
"""Ensure isotopc classes get mixed properly."""
uo2 = UO2()
massFracO = uo2.p.massFrac["O"]
uo2.p.class1_wt_frac = 0.2
enrichedMassFracs = {"U235": 0.3, "U234": 0.1, "PU239": 0.6}
fertileMassFracs = {"U238": 0.3, "PU240": 0.7}
densityTools.applyIsotopicsMix(uo2, enrichedMassFracs, fertileMassFracs)
self.assertAlmostEqual(
uo2.p.massFrac["U234"], (1 - massFracO) * 0.2 * 0.1
) # HM blended
self.assertAlmostEqual(
uo2.p.massFrac["U238"], (1 - massFracO) * 0.8 * 0.3
) # HM blended
self.assertAlmostEqual(uo2.p.massFrac["O"], massFracO) # non-HM stays unchanged
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
import pytest
from morphometrics.explore._tests._explore_test_utils import make_test_features_anndata
from morphometrics.explore.dimensionality_reduction import pca, umap
@pytest.mark.parametrize("normalize_data", [True, False])
def test_pca_no_gpu(normalize_data: bool):
"""This test doesn't check correctness of the PCA in, just that it
runs and adds the correct fields
"""
adata = make_test_features_anndata()
pca(adata, normalize_data=normalize_data)
assert "X_pca" in adata.obsm_keys()
@pytest.mark.parametrize("normalize_data", [True, False])
def test_umap_no_gpu(normalize_data):
adata = make_test_features_anndata()
umap(adata, normalize_data=normalize_data, compute_neighbors=True)
assert "X_umap" in adata.obsm_keys()
|
nilq/baby-python
|
python
|
#
# Created by Lukas Lüftinger on 05/02/2019.
#
from .clf.svm import TrexSVM
from .clf.xgbm import TrexXGB
from .shap_handler import ShapHandler
__all__ = ['TrexXGB', 'TrexSVM', 'ShapHandler']
|
nilq/baby-python
|
python
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`cli.py`
Command-Line interface of `slo-generator`.
"""
import argparse
import yaml
import logging
import sys
from slo_generator.compute import compute
import slo_generator.utils as utils
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.getLogger('googleapiclient').setLevel(logging.ERROR)
LOGGER = logging.getLogger(__name__)
def main():
args = parse_args(sys.argv[1:])
slo_config_path = utils.normalize(args.slo_config)
error_budget_path = utils.normalize(args.error_budget_policy)
export = args.export
LOGGER.info("Loading SLO config from %s" % slo_config_path)
LOGGER.info("Loading Error Budget config from %s" % error_budget_path)
with open(slo_config_path, 'r') as f:
slo_config = yaml.safe_load(f)
with open(error_budget_path, 'r') as f:
error_budget_policy = yaml.safe_load(f)
compute(slo_config, error_budget_policy, do_export=export)
def parse_args(args):
"""Parse CLI arguments.
Args:
args (list): List of args passed from CLI.
Returns:
obj: Args parsed by ArgumentParser.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--slo-config',
type=str,
required=False,
default='slo.json',
help='JSON configuration file')
parser.add_argument('--error-budget-policy',
type=str,
required=False,
default='error_budget_policy.json',
help='JSON configuration file')
parser.add_argument('--export', type=bool, required=False, default=False)
return parser.parse_args(args)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class CompartmentDefinition_ResourceSchema:
"""
A compartment definition that defines how resources are accessed on a server.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A compartment definition that defines how resources are accessed on a server.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
code: The name of a resource supported by the server.
param: The name of a search parameter that represents the link to the compartment.
More than one may be listed because a resource may be linked to a compartment
in more than one way,.
documentation: Additional documentation about the resource and compartment.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
if (
max_recursion_limit
and nesting_list.count("CompartmentDefinition_Resource")
>= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["CompartmentDefinition_Resource"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The name of a resource supported by the server.
StructField("code", StringType(), True),
# The name of a search parameter that represents the link to the compartment.
# More than one may be listed because a resource may be linked to a compartment
# in more than one way,.
StructField("param", ArrayType(StringType()), True),
# Additional documentation about the resource and compartment.
StructField("documentation", StringType(), True),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
|
nilq/baby-python
|
python
|
def square_exp(x:int, power:int) -> int:
if (power < 0):
raise ValueError("exp: power < 0 is unsupported")
result = 1
bit_list = []
while (power != 0):
bit_list.insert(0, power % 2)
power //= 2
for i in bit_list:
result = result * result
if (i == 1):
result = result * x
return result
exp = square_exp
|
nilq/baby-python
|
python
|
"""This module defines the header class."""
import abc
import datetime
from typing import Optional, Dict
from ..misc.errorvalue import ErrorValue
class Header(object, metaclass=abc.ABCMeta):
"""A generic header class for SAXS experiments, with the bare minimum attributes to facilitate data processing and
reduction.
"""
def __init__(self, headerdict: Optional[Dict] = None):
if headerdict is None:
self._data = {}
else:
self._data = headerdict
@property
@abc.abstractmethod
def title(self) -> str:
"""Sample name"""
@property
@abc.abstractmethod
def fsn(self) -> int:
"""File sequence number """
@property
@abc.abstractmethod
def energy(self) -> ErrorValue:
"""X-ray energy"""
@property
@abc.abstractmethod
def wavelength(self) -> ErrorValue:
"""X-ray wavelength"""
@property
@abc.abstractmethod
def distance(self) -> ErrorValue:
"""Sample-to-detector distance"""
@property
@abc.abstractmethod
def temperature(self) -> Optional[ErrorValue]:
"""Sample temperature"""
@property
@abc.abstractmethod
def beamcenterx(self) -> ErrorValue:
"""X (column) coordinate of the beam center, pixel units, 0-based."""
@property
@abc.abstractmethod
def beamcentery(self) -> ErrorValue:
"""Y (row) coordinate of the beam center, pixel units, 0-based."""
@property
@abc.abstractmethod
def pixelsizex(self) -> ErrorValue:
"""X (column) size of a pixel, in mm units"""
@property
@abc.abstractmethod
def pixelsizey(self) -> ErrorValue:
"""Y (row) size of a pixel, in mm units"""
@property
@abc.abstractmethod
def exposuretime(self) -> ErrorValue:
"""Exposure time in seconds"""
@property
@abc.abstractmethod
def date(self) -> datetime.datetime:
"""Date of the experiment (start of exposure)"""
@property
@abc.abstractmethod
def startdate(self) -> datetime.datetime:
"""Date of the experiment (start of exposure)"""
@property
@abc.abstractmethod
def enddate(self) -> datetime.datetime:
"""Date of the experiment (end of exposure)"""
@property
@abc.abstractmethod
def maskname(self) -> Optional[str]:
"""Name of the mask matrix file."""
@abc.abstractmethod
def new_from_file(self, filename):
"""Load a header from a file."""
@property
@abc.abstractmethod
def transmission(self) -> ErrorValue:
"""Sample transmission."""
@property
@abc.abstractmethod
def vacuum(self) -> ErrorValue:
"""Vacuum pressure around the sample"""
@property
@abc.abstractmethod
def flux(self) -> ErrorValue:
"""X-ray flux in photons/sec."""
@property
@abc.abstractmethod
def thickness(self) -> ErrorValue:
"""Sample thickness in cm"""
@property
@abc.abstractmethod
def distancedecrease(self) -> ErrorValue:
"""Distance by which the sample is nearer to the detector than the
distance calibration sample"""
@property
@abc.abstractmethod
def samplex(self) -> ErrorValue:
"""Horizontal sample position"""
@property
@abc.abstractmethod
def sampley(self) -> ErrorValue:
"""Vertical sample position"""
@abc.abstractmethod
def motorposition(self, motorname: str) -> float:
"""Position of the motor `motorname`."""
@property
@abc.abstractmethod
def username(self) -> str:
"""Name of the instrument operator"""
@property
@abc.abstractmethod
def project(self) -> str:
"""Project name"""
@property
@abc.abstractmethod
def fsn_emptybeam(self) -> int:
"""File sequence number of the empty beam measurement"""
@property
@abc.abstractmethod
def fsn_absintref(self) -> int:
"""File sequence number of the absolute intensity reference measurement
"""
@property
@abc.abstractmethod
def absintfactor(self) -> ErrorValue:
"""Absolute intensity calibration factor"""
@property
@abc.abstractmethod
def samplex_motor(self) -> Optional[float]:
"""Sample X position, motor reading."""
@property
@abc.abstractmethod
def sampley_motor(self) -> Optional[float]:
"""Sample X position, motor reading."""
@property
@abc.abstractmethod
def sample_category(self) -> str:
"""Sample category"""
|
nilq/baby-python
|
python
|
from PySide2.QtGui import QVector3D
import numpy as np
def validate_nonzero_qvector(value: QVector3D):
if value.x() == 0 and value.y() == 0 and value.z() == 0:
raise ValueError("Vector is zero length")
def get_an_orthogonal_unit_vector(input_vector: QVector3D) -> QVector3D:
"""
Return a unit vector which is orthogonal to the input vector
There are infinite valid solutions, just one is returned
"""
if np.abs(input_vector.z()) < np.abs(input_vector.x()):
vector = QVector3D(input_vector.y(), -input_vector.x(), 0.0)
return vector.normalized()
return QVector3D(0.0, -input_vector.z(), input_vector.y()).normalized()
|
nilq/baby-python
|
python
|
'''Given an array nums of size n, return the majority element.
The majority element is the element that appears more than ⌊n / 2⌋ times. You may assume that the majority element always exists in the array.
Example 1:
Input: nums = [3,2,3]
Output: 3
Example 2:
Input: nums = [2,2,1,1,1,2,2]
Output: 2'''
class Solution:
def majorityElement(self, nums):
maj=nums[0]
c=1
for i in range(1,len(nums)):
if nums[i]!=maj:
c-=1
else:
c+=1
if c==0:
maj=nums[i]
c=1
return maj
k=Solution()
print(k.majorityElement([2,4,3,3,3,5,4,3,3,3,3,4,4,4,4,4,4,4,4]))
|
nilq/baby-python
|
python
|
"""The Efergy integration."""
from __future__ import annotations
from pyefergy import Efergy, exceptions
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import DEFAULT_NAME, DOMAIN
PLATFORMS = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Efergy from a config entry."""
api = Efergy(
entry.data[CONF_API_KEY],
session=async_get_clientsession(hass),
utc_offset=hass.config.time_zone,
currency=hass.config.currency,
)
try:
await api.async_status(get_sids=True)
except (exceptions.ConnectError, exceptions.DataError) as ex:
raise ConfigEntryNotReady(f"Failed to connect to device: {ex}") from ex
except exceptions.InvalidAuth as ex:
raise ConfigEntryAuthFailed(
"API Key is no longer valid. Please reauthenticate"
) from ex
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = api
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class EfergyEntity(Entity):
"""Representation of a Efergy entity."""
_attr_attribution = "Data provided by Efergy"
def __init__(self, api: Efergy, server_unique_id: str) -> None:
"""Initialize an Efergy entity."""
self.api = api
self._attr_device_info = DeviceInfo(
configuration_url="https://engage.efergy.com/user/login",
connections={(dr.CONNECTION_NETWORK_MAC, api.info["mac"])},
identifiers={(DOMAIN, server_unique_id)},
manufacturer=DEFAULT_NAME,
name=DEFAULT_NAME,
model=api.info["type"],
sw_version=api.info["version"],
)
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 Abe Jellinek
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author not the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ABE JELLINEK BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import sys
import requests
import regex as re
import errno
import gzip
import json
import argparse
import unicodedata
import shutil
import csv
from tqdm import tqdm
from datetime import datetime
from pymarc import Record, Field
URL_BASE = 'https://unpaywall-data-snapshots.s3-us-west-2.amazonaws.com/'
FIELD_NAMES = ['Primary Author', 'Title', 'Year', 'Journal', 'PDF', 'DOI', 'Full JSON']
title_splitter = re.compile(r'([:;\\/\p{Pd},.])')
filters = []
def prompt(question, default=True):
choices = '[Y/n]' if default else '[y/N]'
default_choice = 'Y' if default else 'N'
user_entered = input(f'{question} {choices} ').strip().lower()
while user_entered and user_entered != 'y' and user_entered != 'n':
user_entered = input(' ' * max(len(question) - 3, 0) + \
f' ?? {choices} (or press enter for {default_choice}) ').strip().lower()
if not user_entered:
return default
elif user_entered == 'y':
return True
else:
return False
def latest_dataset():
r = requests.get(URL_BASE)
manifest = r.text
match = re.search('(?s:.*)<Key>([^<]+)</Key><LastModified>([^<]+)</LastModified><ETag>[^<]+</ETag><Size>([0-9]+)</Size>', \
manifest)
if not match:
return
path = URL_BASE + match.group(1)
last_modified = datetime.strptime(match.group(2), '%Y-%m-%dT%H:%M:%S.%fZ')
size = int(match.group(3))
return path, last_modified, size
def run_download(args):
local_data_path = args.path
path, last_modified, size = latest_dataset()
if path:
size_in_gb = size / 1073741824
print(f'Dataset found. Last update: {last_modified:%d %b %Y}.')
if prompt(f'Download this {size_in_gb:1.1f} GB dataset?', default=True):
if os.path.isfile(local_data_path) \
and not prompt('Output file exists! Replace?', default=False):
sys.exit(0)
try:
os.makedirs(os.path.dirname(local_data_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with requests.get(path, stream=True) as response:
if response.ok:
with open(local_data_path, 'wb') as handle:
with tqdm(
unit='B', unit_scale=True, unit_divisor=1024, miniters=1,
total=size, smoothing=0
) as pbar:
for chunk in response.iter_content(chunk_size=8192):
handle.write(chunk)
pbar.update(len(chunk))
print('Done! Proceeding...')
else:
print(f'ERROR: Download failed with status code {response.status_code}.', file=sys.stderr)
sys.exit(1)
else:
sys.exit(0)
else:
print('ERROR: No dataset found online.', file=sys.stderr)
sys.exit(99)
def format_author(author, reverse=True):
if 'given' in author:
if reverse:
return f"{author.get('family', '')}, {author.get('given')}"
else:
return f"{author['given']} {author.get('family', '')}"
else:
return f"{author.get('family', 'Unknown')}"
def format_authors(authors):
if not authors:
return ''
first_author = format_author(authors[0], reverse=True)
rest = [format_author(author, reverse=False) for author in authors[1:]]
if len(rest) == 0:
return first_author
elif len(rest) == 1:
return f'{first_author} and {rest[0]}'
else:
return f"{first_author}, {', '.join(rest[:-1])}, and {rest[-1]}"
def to_csv(obj, json):
return {
'Primary Author': format_author(obj['z_authors'][0], reverse=True) \
if obj['z_authors'] else 'Unknown',
'Title': obj['title'],
'Year': obj['year'],
'Journal': obj['journal_name'],
'PDF': obj['best_oa_location']['url'],
'DOI': obj['doi_url'],
'Full JSON': json
}
def to_marc(obj):
primary_author = format_author(obj['z_authors'][0], reverse=True) if obj['z_authors'] else None
split_title = [part.strip() for part in title_splitter.split(obj['title'])]
if len(split_title) > 2:
primary_title = f'{split_title[0]} {split_title[1]}'
remainder_of_title = f'{" ".join(split_title[2:])} /'
else:
primary_title = f'{" ".join(split_title)} /'
remainder_of_title = ''
record = Record()
record.leader.type_of_record = 'a'
record.leader.coding_scheme = 'a'
record.bibliographic_level = 'm'
record.cataloging_form = 'a'
# record.add_field(Field(
# tag = '041',
# indicators = ['#', '#'],
# subfields = [
# 'a', 'Eng'
# ]))
if primary_author:
record.add_field(Field(
tag = '100',
indicators = ['0', ' '],
subfields = [
'a', primary_author
]))
if remainder_of_title:
record.add_field(Field(
tag = '245',
indicators = ['0', '0'],
subfields = [
'a', primary_title,
'b', remainder_of_title,
'c', format_authors(obj['z_authors'])
]))
else:
record.add_field(Field(
tag = '245',
indicators = ['0', '0'],
subfields = [
'a', f"{obj['title']} /",
'c', format_authors(obj['z_authors'])
]))
record.add_field(Field(
tag = '260',
indicators = [' ', ' '],
subfields = [
'b', obj['publisher'],
'c', str(obj['year'])
]))
record.add_field(Field(
tag = '500',
indicators = [' ', ' '],
subfields = [
'a', f"Article from {obj['journal_name']}.",
]))
record.add_field(Field(
tag = '856',
indicators = ['4', '0'],
subfields = [
'u', obj['best_oa_location']['url'],
'y', 'View article as PDF'
]))
record.add_field(Field(
tag = '856',
indicators = [' ', ' '],
subfields = [
'u', obj['doi_url'],
'y', 'DOI'
]))
# print(str(record))
return record.as_marc()
def strip_diacritics(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def stream_to_csv(stream):
for line in stream:
obj = json.loads(line)
oa_location = obj['best_oa_location']
if oa_location is None or obj['title'] is None:
continue
title_normalized = strip_diacritics(obj['title']).lower()
if any(pattern.findall(title_normalized) for pattern in filters):
yield to_csv(obj, line)
def stream_to_marc(stream):
for row in stream:
obj = json.loads(row['Full JSON'])
yield to_marc(obj)
def run_filter(args):
for filename in args.pattern:
with open(filename, 'r') as file:
filters.append(re.compile(file.read().strip()))
if args.output_file and os.path.isfile(args.output_file):
if not prompt('Output file exists! Overwrite?', default=False):
sys.exit(1)
downloaded = os.path.isfile(args.dataset)
if not downloaded:
python_command = os.path.basename(sys.executable)
print('ERROR: No downloaded dataset found. Can be downloaded with:', file=sys.stderr)
print(f' {python_command} {sys.argv[0]} dl', file=sys.stderr)
sys.exit(1)
with gzip.open(args.dataset, 'rt') as stream:
if not args.output_file or args.output_file == '-':
writer = csv.DictWriter(sys.stdout, fieldnames=FIELD_NAMES)
writer.writeheader()
for line in stream_to_csv(stream):
writer.writerow(line)
else:
with open(args.output_file, 'w', encoding='utf-8') as out:
writer = csv.DictWriter(out, fieldnames=FIELD_NAMES)
writer.writeheader()
# total here is just an estimate:
for line in stream_to_csv(tqdm(stream, unit=' articles', total=114164038, smoothing=0)):
writer.writerow(line)
def run_marc(args):
with open(args.csv) as csv_file:
if not args.output_file or args.output_file == '-':
reader = csv.DictReader(csv_file, fieldnames=FIELD_NAMES)
next(reader) # read through header
for marc in stream_to_marc(reader):
sys.stdout.buffer.write(marc)
else:
with open(args.output_file, 'wb') as out:
reader = csv.DictReader(csv_file, fieldnames=FIELD_NAMES)
next(reader) # read through header
for marc in stream_to_marc(reader):
out.write(marc)
def main():
parser = argparse.ArgumentParser(description='Filter Unpaywall data for library use.')
subparsers = parser.add_subparsers()
parser_dl = subparsers.add_parser('download', aliases=['dl'])
parser_dl.add_argument('-o', dest='path', default='data/unpaywall_snapshot.jsonl.gz',
help='store in the specified location [optional, default location recommended]')
parser_dl.set_defaults(func=run_download)
parser_filter = subparsers.add_parser('filter')
parser_filter.add_argument('-p', action='append', dest='pattern', default=['filters/jordan'],
help='specify path to a file containing paper title regex (repeat for OR)')
parser_filter.add_argument('-d', dest='dataset', default='data/unpaywall_snapshot.jsonl.gz',
help='specify path to the Unpaywall dataset in GZIP format')
parser_filter.add_argument('-o', dest='output_file',
help='output to specified CSV file [optional, default: stdout]')
parser_filter.set_defaults(func=run_filter)
parser_marc = subparsers.add_parser('marc')
parser_marc.add_argument('csv', help='input CSV file to process')
parser_marc.add_argument('-o', dest='output_file',
help='output to specified MARC file [optional, default: stdout]')
parser_marc.set_defaults(func=run_marc)
if len(sys.argv) < 2:
parser.print_usage()
sys.exit(1)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from . import display
from . import field_utils
from . import hunt_ai
from . import player
from . import probabilistic_ai
from . import random_ai
|
nilq/baby-python
|
python
|
# coding=utf-8
import humanize
import logging
import re
import times
from markdown import markdown
from path import path
from typogrify import Typogrify
from typogrify.templatetags import jinja2_filters
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
logger = logging.getLogger(__name__)
def format_datetime(value, format_string='%Y-%m-%d'):
return value.strftime(format_string)
def markdown_filter(value, typogrify=True, extensions=('extra', 'codehilite')):
"""
A smart wrapper around the ``markdown`` and ``typogrify`` functions that automatically removes leading
whitespace before every line. This is necessary because Markdown is whitespace-sensitive. Consider some Markdown
content in a template that looks like this:
.. codeblock:: html+jinja
<article>
{% filter markdown %}
## A Heading
Some content here.
Code goes here.
More lines of code
And more.
Closing thoughts
{% endfilter %}
</article>
In this example, a typical Markdown filter would see the leading whitespace before the first heading and assume
it was a code block, which would then cause the entire Markdown document to be rendered incorrectly. You may have
a document with spacing like this because your text editor automatically 'pretty-prints' the markup,
including the content within the filter tag.
This filter automatically removes the leading whitespace - leaving code block and other expected offsets in place
of course - so that rendering occurs correctly regardless of the nested spacing of the source document.
"""
# Determine how many leading spaces there are, then remove that number from the beginning of each line.
match = re.match(r'(\n*)(\s*)', value)
s, e = match.span(2)
pattern = re.compile(r'^ {%s}' % (e - s), # use ^ in the pattern so mid-string matches won't be removed
flags=re.MULTILINE) # use multi-line mode so ^ will match the start of each line
output = pattern.sub(u'', value)
if typogrify:
return jinja2_filters.typogrify(markdown(output, extensions=extensions))
else:
return markdown(output, extensions=extensions)
def localtime(value, tz=None):
from engineer.conf import settings
if tz is None:
tz = settings.POST_TIMEZONE
return times.to_local(value, tz)
def naturaltime(value):
from engineer.conf import settings
server_time = localtime(value, settings.SERVER_TIMEZONE).replace(tzinfo=None)
friendly = humanize.naturaltime(server_time)
return friendly
def compress(value):
from engineer.conf import settings
if not settings.COMPRESSOR_ENABLED:
return value
else: # COMPRESSOR_ENABLED == True
import html5lib
def _min_js_slim(js_string):
# NOTE: The slimit filter seems to break some scripts. I'm not sure why. I'm leaving this code in for
# posterity, but it's not functional right now and shouldn't be used.
from slimit import minify
return minify(js_string)
doc = html5lib.parseFragment(value.strip())
to_compress = [l for l in doc.childNodes if
l.name in ('link', 'script')]
for item in to_compress:
if item.name == 'link':
src = item.attributes['href']
compression_type = 'css'
elif item.name == 'script':
if 'src' in item.attributes:
src = item.attributes['src']
compression_type = 'js'
else: # inline script
continue
# TODO: Inline script minification.
#has_inline = True
#if len(item.childNodes) > 1:
# raise Exception("For some reason the inline script node has more than one child node.")
#else:
# item.childNodes[0].value = _min_js(item.childNodes[0].value)
else:
raise Exception("Hmmm, wasn't expecting a '%s' here." % item.name)
if src.startswith(settings.HOME_URL):
# trim the HOME_URL since it won't be part of the local path to the file
src = src[len(settings.HOME_URL):]
elif src.startswith('/'):
# trim the leading '/' from the src so we can combine it with the OUTPUT_CACHE_DIR to get a path
src = src[1:]
file = path(settings.OUTPUT_CACHE_DIR / src).abspath()
if file.ext[1:] in settings.COMPRESSOR_FILE_EXTENSIONS:
settings.COMPRESS_FILE_LIST.add((file, compression_type))
# TODO: Inline script minification.
# if has_inline: # Handle inline script
# # Since we have inline script, we need to serialize the minified content into a string and return it
# walker = treewalkers.getTreeWalker('simpletree')
# stream = walker(doc)
# s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False,
# #strip_whitespace=True,
# quote_attr_values=True)
# generator = s.serialize(stream)
# output = ''
# for tag in generator:
# output += tag
return value
def typogrify_no_widont(value):
value = Typogrify.amp(value)
value = Typogrify.smartypants(value)
value = Typogrify.caps(value)
value = Typogrify.initial_quotes(value)
return value
|
nilq/baby-python
|
python
|
from conan.packager import ConanMultiPackager
import copy
import platform
if __name__ == "__main__":
builder = ConanMultiPackager(archs = ["x86_64"])
builder.add_common_builds(pure_c=False)
items = []
for item in builder.items:
if item.settings["compiler"] == "Visual Studio":
if item.settings["compiler.runtime"] == "MT" or item.settings["compiler.runtime"] == "MTd":
# Ignore MT runtime
continue
# Build static only
if item.options["vtk:shared"]:
continue
new_options = copy.copy(item.options)
new_options["vtk:qt"] = True
new_options["vtk:ioxml"] = True
items.append([item.settings, new_options, item.env_vars, item.build_requires])
new_options = copy.copy(item.options)
new_options["vtk:minimal"] = True
new_options["vtk:ioxml"] = True
items.append([item.settings, new_options, item.env_vars, item.build_requires])
builder.items = items
builder.run()
|
nilq/baby-python
|
python
|
"""Define base class for a course guide argument."""
from args import _RawInputValue, _QueryKVPairs, _InputValue, _InputValues, \
_QueryValues, _ARG_TYPE_TO_QUERY_KEY
from args.meta_arg import MetaArg
from typing import final, Optional
class Arg(metaclass=MetaArg):
"""Base class for a Course Guide argument."""
@final
def __init__(self, input: Optional[_RawInputValue] = None) -> None:
self._kvpairs: _QueryKVPairs = [] if not input else \
type(self)._make_kvpairs(type(self)._translate_input_values(
type(self)._fix_input_values(type(self)._prepare_input(
input
))
))
@final
@property
def kvpairs(self) -> _QueryKVPairs:
return self._kvpairs
@final
def url_piece(self) -> str:
if type(self)._MANDATORY:
assert self._is_set()
if not self._is_set():
return ''
piece = ''
for k, vals in self._kvpairs:
piece += f'{k}='
for v in vals:
piece += f'{v}+'
piece = piece[:-1] + '&' # Remove last +, add &
return piece[:-1] # Remove last &
@final
def _is_set(self) -> bool:
return bool(self._kvpairs)
@classmethod
def _valid_input_value(cls, input_value: _InputValue) -> bool:
return cls._INPUT_VAL_TO_QUERY_VAL is None or \
input_value in cls._INPUT_VAL_TO_QUERY_VAL
@classmethod
def _prepare_input(cls, input: _RawInputValue) -> _InputValues:
return input.split(', ')
@classmethod
def _fix_input_values(cls, input_values: _InputValues) -> _InputValues:
valids: _InputValues = []
invalids: _InputValues = []
for val in input_values:
if cls._valid_input_value(val):
valids.append(val)
else:
invalids.append(val)
if invalids:
cls._print_invalid_input_values_msg(invalids)
return valids
@classmethod
def _translate_input_values(cls, input_values: _InputValues
) -> _QueryValues:
if not cls._INPUT_VAL_TO_QUERY_VAL:
return input_values
return tuple([cls._INPUT_VAL_TO_QUERY_VAL[val] for val in input_values])
@classmethod
def _make_kvpairs(cls, query_values: _QueryValues) -> _QueryKVPairs:
if not query_values:
return set()
# Most arguments map to query key and don't use + for values
# key=v1&key=v2&...
key = _ARG_TYPE_TO_QUERY_KEY[cls._ARGTYPE]
return {(key, (v,)) for v in query_values}
@classmethod
def _print_invalid_input_values_msg(cls, invalids: _InputValues) -> None:
if len(invalids) == 1:
print(f"Not using invalid {cls._INPUT_KEY}: '{invalids[0]}'")
else:
print(f'Not using {len(invalids)} invalid {cls._INPUT_KEY}(s):')
for inv in invalids:
print(f"\t'{inv}'")
cls._print_valid_input_values()
@classmethod
def _print_valid_input_values(cls) -> None:
if not cls._INPUT_VAL_TO_QUERY_VAL:
return
print(f'Valid {cls._INPUT_KEY}(s):')
for input_val in cls._INPUT_VAL_TO_QUERY_VAL:
print(f'\t{input_val}')
|
nilq/baby-python
|
python
|
import sys
import os
import numpy as np
import h5py
sys.path.append('./utils_motion')
from Animation import Animation, positions_global
from Quaternions import Quaternions
from BVH import save
from skeleton import Skeleton
import argparse
offsets = np.array([
[ 0. , 0. , 0. ],
[-132.948591, 0. , 0. ],
[ 0. , -442.894612, 0. ],
[ 0. , -454.206447, 0. ],
[ 0. , 0. , 162.767078],
[ 0. , 0. , 74.999437],
[ 132.948826, 0. , 0. ],
[ 0. , -442.894413, 0. ],
[ 0. , -454.20659 , 0. ],
[ 0. , 0. , 162.767426],
[ 0. , 0. , 74.999948],
[ 0. , 0.1 , 0. ],
[ 0. , 233.383263, 0. ],
[ 0. , 257.077681, 0. ],
[ 0. , 121.134938, 0. ],
[ 0. , 115.002227, 0. ],
[ 0. , 257.077681, 0. ],
[ 0. , 151.034226, 0. ],
[ 0. , 278.882773, 0. ],
[ 0. , 251.733451, 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 99.999627],
[ 0. , 100.000188, 0. ],
[ 0. , 0. , 0. ],
[ 0. , 257.077681, 0. ],
[ 0. , 151.031437, 0. ],
[ 0. , 278.892924, 0. ],
[ 0. , 251.72868 , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 99.999888],
[ 0. , 137.499922, 0. ],
[ 0. , 0. , 0. ]
], dtype='float64') * 0.01
parents = np.array([-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,
16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30], dtype='int64')
joints_left = np.array([1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31], dtype='int64')
joints_right = np.array([6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23], dtype='int64')
orients = Quaternions.id(1)
orients_final = np.array([[1,0,0,0]]).repeat(len(offsets), axis=0)
orients.qs = np.append(orients.qs, orients_final, axis=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bvh_dir',
type=str,
default='./pretrained/output/recon/bvh')
parser.add_argument('--hdf5_path',
type=str,
default='./pretrained/output/recon/m_recon.hdf5')
args = parser.parse_args()
file_dir = args.bvh_dir
for jj in range(60): # # of test motions: 60
with h5py.File(args.hdf5_path, 'r') as h5f:
rotations = h5f['batch{0}'.format(jj + 1)][:] # (fnum, n_joint, 4)
rotations = rotations[:-10] # drop the last few frames
fnum = rotations.shape[0]
positions = offsets[np.newaxis].repeat(fnum, axis=0)
rotations_Quat = Quaternions(rotations)
anim = Animation(rotations_Quat, positions, orients, offsets, parents)
xyz = positions_global(anim)
height_offset = np.min(xyz[:, :, 1]) # Min height
positions[:, :, 1] -= height_offset
anim.positions = positions
filename = 'batch{0}.bvh'.format(jj+1)
filepath = os.path.join(file_dir, filename)
try:
if not(os.path.isdir(file_dir)):
print("Creating directory: {}".format(file_dir))
os.makedirs(file_dir)
except OSError:
pass
save(filepath, anim, frametime=1.0/24.0)
|
nilq/baby-python
|
python
|
from nuaal.Models.BaseModels import BaseModel, DeviceBaseModel
from nuaal.connections.api.apic_em.ApicEmBase import ApicEmBase
from nuaal.utils import Filter
import copy
class ApicEmDeviceModel(DeviceBaseModel):
"""
"""
def __init__(self, apic=None, object_id=None, filter=None, DEBUG=False):
"""
:param apic:
:param object_id:
:param filter:
:param DEBUG:
"""
super(ApicEmDeviceModel, self).__init__(name="ApicEmDeviceModel", DEBUG=DEBUG)
self.apic = apic if isinstance(apic, ApicEmBase) else ApicEmBase()
self.filter = filter
self.apic._initialize()
self.apic_object_id = object_id
self._initialize()
def _initialize(self):
"""
:return:
"""
if self.apic_object_id is None:
if "id" in self.filter.required.keys():
self.apic_object_id = self.filter.required["id"]
else:
self.logger.debug(msg="No apic_object_id provided, trying to match based on filter.")
try:
response = self.apic.get(path="/network-device")
response = self.filter.universal_cleanup(data=response)
if len(response) == 1:
self.logger.debug(msg="Exactly one object matched query. apic_object_id: '{}'".format(response[0]["id"]))
self.apic_object_id = response[0]["id"]
else:
self.logger.error(msg="Multiple ({}) APIC-EM objects match filter query. Please provide more specific query or enter object_id manually.".format(len(response)))
except Exception as e:
self.logger.critical(msg="Unhandled Exception occurred while trying to initialize. Exception: {}".format(repr(e)))
response = self.apic.get(path="/network-device/{}".format(self.apic_object_id))
print(response)
self.device_info["mgmtIpAddress"] = response["managementIpAddress"]
self.device_info["hostname"] = response["hostname"]
self.device_info["vendor"] = "Cisco"
self.device_info["platform"] = response["platformId"]
self.device_info["swVersion"] = response["softwareVersion"]
self.device_info["uptime"] = response["upTime"]
def get_interfaces(self):
"""
:return:
"""
if self.apic_object_id is None:
self.logger.error(msg="Cannot query APIC-EM for interfaces, no device ID found.")
return {}
response = self.apic.get(path="/interface/network-device/{}".format(self.apic_object_id))
for interface in response:
print(interface)
name = interface["portName"]
self.interfaces[name] = copy.deepcopy(self.interface_model)
self.interfaces[name]["description"] = interface["description"],
self.interfaces[name]["interfaceType"] = interface["interfaceType"],
self.interfaces[name]["className"] = interface["className"],
self.interfaces[name]["status"] = interface["status"],
self.interfaces[name]["macAddress"] = interface["macAddress"].upper(),
self.interfaces[name]["adminStatus"] = interface["adminStatus"],
self.interfaces[name]["speed"] = interface["speed"],
self.interfaces[name]["portName"] = interface["portName"],
self.interfaces[name]["untaggedVlanId"] = interface["nativeVlanId"],
self.interfaces[name]["taggedVlanIds"] = interface["vlanId"],
self.interfaces[name]["duplex"] = interface["duplex"],
self.interfaces[name]["portMode"] = interface["portMode"],
self.interfaces[name]["portType"] = interface["portType"],
self.interfaces[name]["ipv4Mask"] = interface["ipv4Mask"],
self.interfaces[name]["ipv4Address"] = interface["ipv4Address"],
self.interfaces[name]["mediaType"] = interface["mediaType"],
return self.interfaces
def get_vlans(self):
"""
:return:
"""
raise NotImplemented
if self.apic_object_id is None:
self.logger.error(msg="Cannot query APIC-EM for interfaces, no device ID found.")
return {}
response = self.apic.get(path="/network-device/{}/vlan".format(self.apic_object_id))
for vlan in response:
print(response)
def get_inventory(self):
"""
:return:
"""
if self.apic_object_id is None:
self.logger.error(msg="Cannot query APIC-EM for interfaces, no device ID found.")
return {}
response = self.apic.get(path="/network-device/module", params={"deviceId": self.apic_object_id})
for raw_module in response:
print(raw_module)
module = copy.deepcopy(self.inventory_model)
module["name"] = raw_module["name"]
module["description"] = raw_module["description"]
module["partNumber"] = raw_module["partNumber"]
module["serialNumber"] = raw_module["serialNumber"]
module["version"] = raw_module["assemblyRevision"]
self.inventory.append(module)
return self.inventory
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import glob
import os
import signal
import subprocess
import sys
import time
from multiprocessing import Process
import yaml
MODULE_PATH = os.path.abspath(os.path.join("."))
if MODULE_PATH not in sys.path:
sys.path.append(MODULE_PATH)
from xt.benchmark.tools.evaluate_xt import get_bm_args_from_config, read_train_event_id
CI_WORKSPACE = "scripts/ci_tmp_yaml"
def rm_files(path):
for i in os.listdir(path):
path_file = os.path.join(path, i)
if os.path.isfile(path_file):
os.remove(path_file)
def write_conf_file(config_folder, config):
with open(config_folder, "w") as file:
yaml.dump(config, file)
def check_sys_argv(argv):
"""fetch ci parameters."""
if len(argv) != 2:
print("input argv err")
exit(1)
config_file = argv[1]
with open(config_file) as conf:
config_list = yaml.load(conf)
end_tag = config_list.get("end_flag")
ci_task = config_list.get("task")
save_steps = config_list.get("model_save_step", 100)
config_dir = config_list.get("config_dir", "examples/default_cases")
single_flag = config_list.get("single_case", None)
print("##################################")
print("TEST CONFIG FOLDER IS ", config_dir)
print("STEP FOR EACH TEST IS ", save_steps)
print("##################################")
if ci_task == "train":
node_array = config_list.get("node_config")
elif ci_task == "eval":
node_array = config_list.get("test_node_config")
else:
node_array = None
print("invalid test type: {}".format(ci_task))
exit(1)
return node_array, end_tag, ci_task, save_steps, config_dir, single_flag
def assemble_ci_config(target_yaml, ci_task, node_list, save_steps):
with open(target_yaml) as config_file:
config = yaml.load(config_file)
alg_config = config["alg_para"].get("alg_config")
if alg_config is None:
alg_save_steps = {"alg_config": {"save_model_step": save_steps}}
config["alg_para"].update(alg_save_steps)
else:
config["alg_para"]["alg_config"].setdefault("save_model_step", save_steps)
if ci_task == "train":
for k in config.get("node_config"):
config["node_config"].pop()
for i in range(len(node_list)):
config["node_config"].append(node_list[i])
elif ci_task == "eval":
config["test_node_config"].pop()
config["test_node_config"].append(node_list[0])
return config
def run_test(tmp_conf, ci_task):
process = subprocess.Popen(
["setsid", "python3", "xt/main.py", "--config_file", tmp_conf, "--task", ci_task],
# stdout=subprocess.PIPE,
)
return process
def check_test(flag, ci_task, model_path, tmp_file):
if os.path.isdir(model_path) is False:
previous_length = 0
else:
files_model = os.listdir(model_path)
previous_length = len(files_model)
start = time.time()
test_process = run_test(tmp_file, ci_task)
normal_return_code = (0, -9, -15)
while True:
returncode = test_process.poll()
# print("returncode:", returncode)
if returncode is not None and returncode not in normal_return_code:
print("get a err on test", tmp_file)
if flag:
exit(1)
else:
break
if ci_task == "train":
time.sleep(2)
try:
file_module = os.listdir(model_path)
files_num = len(file_module)
except Exception:
files_num = 0
print(files_num, previous_length, tmp_file, model_path)
if previous_length < files_num:
if returncode is None:
close_test(test_process)
elif returncode in normal_return_code:
rm_files(model_path)
break
elif ci_task == "evaluate":
end = time.time() - start
if end > 20:
if returncode is None:
close_test(test_process)
elif returncode == 0:
break
else:
print("test failed")
exit(1)
def close_test(process):
process.send_signal(signal.SIGINT)
# process.kill()
# process.terminate()
print("sent close signal to work process")
time.sleep(1)
def parallel_case_check(processes):
while True:
exitcodes = []
for process in processes:
exitcodes.append(process.exitcode)
if process.exitcode is not None and process.exitcode != 0:
return 1
exitcode_state = True
for exitcode in exitcodes:
if exitcode is None:
exitcode_state = False
if exitcode_state:
return 0
time.sleep(0.1)
def main():
node_list, end_flag, ci_task, save_steps, conf_dir, sgf = check_sys_argv(sys.argv)
if not os.path.isdir(CI_WORKSPACE):
os.makedirs(CI_WORKSPACE)
_candidates = glob.glob("{}/*.yaml".format(conf_dir))
target_yaml = [item for item in _candidates if item[0] != "."]
print("CI start parse yaml: \n", target_yaml)
if len(target_yaml) < 1:
print("exit with config folder is empty")
exit(1)
# go through all the config files
for one_yaml in target_yaml:
# print(end_flag)
if sgf and one_yaml != sgf:
continue
print("processing: {}".format(one_yaml))
config_tmp = assemble_ci_config(one_yaml, ci_task, node_list, save_steps)
processes_parallel = []
# go through all the node in node_config
for node_n in range(len(node_list)):
tmp_name = (
os.path.split(one_yaml)[-1]
+ "_node_"
+ str(len(config_tmp.get("node_config")))
)
if node_n != 0:
config_tmp["node_config"].pop()
# try environment number in 1 and 2
for env_n in range(2):
config_tmp["env_num"] = env_n + 1
tmp_name += "_e-" + str(config_tmp.get("env_num"))
# ---------
bm_id = config_tmp.get("benchmark", dict()).get("id")
if not bm_id:
_str_list = list()
_str_list.append(config_tmp.get("agent_para").get("agent_name"))
_str_list.append(config_tmp.get("env_para").get("env_name"))
_str_list.append(config_tmp.get("env_para").get("env_info").get("name"))
bm_id = "+".join(_str_list)
bm_id = "{}+e{}".format(bm_id, env_n)
if not config_tmp.get("benchmark"):
config_tmp.update({"benchmark": {"id": bm_id}})
else:
config_tmp["benchmark"].update({"id": bm_id})
tmp_yaml_name = os.path.join(CI_WORKSPACE, tmp_name)
write_conf_file(tmp_yaml_name, config_tmp)
from xt.benchmark.tools.evaluate_xt import (
get_train_model_path_from_config,
)
model_path = get_train_model_path_from_config(config_tmp)
print("model save path: ", model_path)
p = Process(
target=check_test,
args=(end_flag, ci_task, model_path, tmp_yaml_name),
)
p.start()
processes_parallel.append(p)
time.sleep(0.4)
end_check = parallel_case_check(processes_parallel)
time.sleep(1)
if end_check == 1:
print("test failed")
exit(1)
rm_files(CI_WORKSPACE)
print("Normal train passed")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Copyright (c) 2021, Bhavuk Sharma
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile.widget import base
import os
class active(base.ThreadPoolText):
"""
This widget will show an indicator on satusbar if Camera or Microphone is being used by an application on
your machine.
This is similar like what is being offered in iOS 14 and Android 12, firefox also has a similar feature.
WARNING: IF update_interval IS HIGH THAN IT WILL NOT BE ABLE TO DETECT IF CAMERA OR MIC IS BEING USED IN BETWEEN
THAT INTERVAL, SO IT IS BETTER TO USE SMALL VALUE FOR update_interval (DEFAULT IS SET TO 1).
"""
defaults = [
(
"update_interval",
1,
"Update interval in seconds, if none, the "
"widget updates whenever it's done'.",
),
("format", "{mic_str} {cam_str}", "Display format for output"),
("cam_device", "/dev/video0", "Path to camera device"),
("mic_device", "/dev/snd/pcmC0D0c", "Path to Microphone device"),
("cam_active", "📸", "Indication when camera active"),
("cam_inactive", "", "Indication when camera is inactive"),
("mic_active", "📢", "Indication when Microphone active"),
("mic_inactive", "", "Indication when mic is inactive"),
]
def __init__(self, **config):
super().__init__("", **config)
self.add_defaults(active.defaults)
def poll(self):
mic = os.system(f"fuser {self.mic_device}")
camera = os.system(f"fuser {self.cam_device}")
vals = dict(
mic_str=self.mic_inactive if mic == 256 else self.mic_active,
cam_str=self.cam_inactive if camera == 256 else self.cam_active,
)
return self.format.format(**vals)
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.7 on 2021-11-23 16:39
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.TextField(verbose_name='Title')),
('author', models.CharField(max_length=50, verbose_name='Author')),
('description', models.TextField(verbose_name='Description')),
('main_desc', ckeditor.fields.RichTextField(blank=True, null=True)),
],
options={
'verbose_name': 'Blog',
'verbose_name_plural': 'Blogs',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.TextField(verbose_name='Title')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.CharField(blank=True, max_length=50, null=True, verbose_name='Author')),
('message', models.TextField(verbose_name='Message')),
('blog', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='blogs', to='blog.blog', verbose_name='Blog')),
('replied', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.comment', verbose_name='Replied')),
],
options={
'verbose_name': 'Comment',
'verbose_name_plural': 'Comments',
},
),
]
|
nilq/baby-python
|
python
|
from django import forms
from properties.models import BookingRequest
class BookingRequestForm(forms.ModelForm):
class Meta:
model = BookingRequest
fields = ['comment', 'mobile_phone']
widgets = {
'comment': forms.Textarea(attrs={
'class': 'contact-form__textarea mb-25', 'cols': 10,
'placeholder': 'Enter your message',
'required': True}),
'mobile_phone': forms.TextInput(attrs={
'class': 'form-control filter-input',
'placeholder': 'Enter your phone number',
'required': True})
}
class ContactForm(forms.Form):
name = forms.CharField(
required=True,
widget=forms.TextInput(attrs={
'class': 'form-control',
'id': 'fname',
'placeholder': 'Name'
}))
email = forms.EmailField(
required=True,
widget=forms.EmailInput(attrs={
'class': 'form-control',
'id': 'email_address',
'placeholder': 'Email address'
}))
message = forms.CharField(
widget=forms.Textarea(attrs={
'class': 'form-control',
'id': 'comment',
'placeholder': 'Your Message'
}))
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
import subprocess
import json
import argparse
import sys
import logging
def main():
parser = argparse.ArgumentParser()
parser.description = u'Compile test a sketch for all available boards'
parser.add_argument(u'-s', u'--sketch', dest=u'sketch',
required=True, help=u'Path to sketch')
args = parser.parse_args()
test_all_boards(args.sketch)
def test_all_boards(sketch):
logging.basicConfig(level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(message)s')
log = logging.getLogger('arduino-compile-test')
process = subprocess.run("arduino-cli board listall --format json".split(),
stdout=subprocess.PIPE)
board_list_json = process.stdout.decode('utf-8')
board_list = json.loads(board_list_json)
test_list = ["arduino:samd:mkrzero", "arduino:avr:mega",
"arduino:avr:nano", "arduino:avr:uno",
"esp32:esp32:esp32", "esp8266:esp8266:generic"]
for board in test_list:
if board in (b['FQBN'] for b in board_list['boards']):
log.info('Test compilation for board {}'.format(board))
command = 'arduino-cli compile --libraries="." --warnings all'\
' --fqbn {board} {sketch}'.format(board=board,
sketch=sketch)
process = subprocess.run(command.split(), stdout=subprocess.PIPE)
if process.returncode:
log.error(process.stdout.decode('utf-8'))
sys.exit(process.returncode)
else:
log.error('Board not installed: {}'.format(board))
sys.exit(-1)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#Chris Melville and Jake Martens
'''
booksdatasourcetest.py
Jeff Ondich, 24 September 2021
'''
import booksdatasource
import unittest
class BooksDataSourceTester(unittest.TestCase):
def setUp(self):
self.data_source_long = booksdatasource.BooksDataSource('books1.csv')
self.data_source_short = booksdatasource.BooksDataSource('books2.csv')
def tearDown(self):
pass
def test_unique_author(self):
authors = self.data_source_long.authors('Pratchett')
self.assertTrue(len(authors) == 1)
self.assertTrue(authors[0].get_author_name() == 'Terry Pratchett')
def test_authors_none(self):
authors = self.data_source_short.authors(None)
self.assertTrue(len(authors) == 3)
self.assertTrue(authors[0].get_author_name() == 'Ann Brontë')
self.assertTrue(authors[1].get_author_name() == 'Charlotte Brontë')
self.assertTrue(authors[2].get_author_name() == 'Connie Willis')
def test_author_sort(self):
authors = self.data_source_short.authors('Brontë')
self.assertTrue(len(authors) == 2)
self.assertTrue(authors[0].get_author_name() == 'Ann Brontë')
self.assertTrue(authors[1].get_author_name() == 'Charlotte Brontë')
def test_case_insensitivity(self):
authors = self.data_source_short.authors('willis')
self.assertTrue(len(authors) == 1)
self.assertTrue(authors[0].get_author_name() == 'Connie Willis')
def test_author_not_on_list(self):
authors = self.data_source_short.authors('Agatha')
self.assertTrue(len(authors) == 0)
def test_unique_book(self):
books = self.data_source_long.books('Sula')
self.assertTrue(len(books) == 1)
self.assertTrue(books[0].get_title() == 'Sula')
def test_book_not_in_file(self):
books = self.data_source_long.books('Cat')
self.assertTrue(len(books) == 0)
def test_books_none(self):
books = self.data_source_short.books(None)
self.assertTrue(len(books) == 3)
self.assertTrue(books[0].get_title() == 'All Clear')
self.assertTrue(books[1].get_title() == 'Jane Eyre')
self.assertTrue(books[2].get_title() == 'The Tenant of Wildfell Hall')
def test_year_sorting(self):
books = self.data_source_short.books('All', 'year')
self.assertTrue(len(books) == 2)
self.assertTrue(books[0].get_title() == 'The Tenant of Wildfell Hall')
self.assertTrue(books[1].get_title() == 'All Clear')
def test_title_sorting_explicit(self):
books = self.data_source_short.books('All', 'title')
self.assertTrue(len(books) == 2)
self.assertTrue(books[0].get_title() == 'All Clear')
self.assertTrue(books[1].get_title() == 'The Tenant of Wildfell Hall')
def test_title_sorting_default(self):
books = self.data_source_short.books('All')
self.assertTrue(len(books) == 2)
self.assertTrue(books[0].get_title() == 'All Clear')
self.assertTrue(books[1].get_title() == 'The Tenant of Wildfell Hall')
def test_books_between_none(self):
books = self.data_source_short.books_between_years()
self.assertTrue(len(books) == 3)
self.assertTrue(books[0].get_title() == 'Jane Eyre')
self.assertTrue(books[1].get_title() == 'The Tenant of Wildfell Hall')
self.assertTrue(books[2].get_title() == 'All Clear')
def test_books_between_tiebreaker(self):
books = self.data_source_long.books_between_years(1995,1996)
self.assertTrue(len(books) == 2)
self.assertTrue(books[0].get_title() == 'Neverwhere')
self.assertTrue(books[1].get_title() == 'Thief of Time')
def test_books_between_no_end(self):
books = self.data_source_long.books_between_years(2020, None)
self.assertTrue(len(books) == 2)
self.assertTrue(books[0].get_title() == 'Boys and Sex')
self.assertTrue(books[1].get_title() == 'The Invisible Life of Addie LaRue')
def test_books_between_no_start(self):
books = self.data_source_long.books_between_years(None,1770)
self.assertTrue(len(books) == 1)
self.assertTrue(books[0].get_title() == 'The Life and Opinions of Tristram Shandy, Gentleman')
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from main.models import Post
# Register your models here.
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
pass
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Location,categories,Image
admin.site.register(Location),
admin.site.register(categories),
admin.site.register(Image),
|
nilq/baby-python
|
python
|
# Challenge 1
# vowels -> g
# ------------
# dog -> dgg
# cat -> cgt
def translate(phrase):
translation = ""
for letter in phrase:
if letter.lower() in "aeiou":
if letter.isupper():
translation = translation + "G"
else:
translation = translation + "g"
else:
translation = translation + letter
return translation
print(translate(input("Enter a phrase: ")))
|
nilq/baby-python
|
python
|
import asyncio
import datetime
import logging
import pytz
import threading
import traceback
from abc import ABC, abstractmethod
from confluent_kafka import DeserializingConsumer, Consumer
from confluent_kafka.schema_registry.json_schema import JSONDeserializer
from confluent_kafka.serialization import StringDeserializer
import app.db_utils.advanced_scheduler as scheduling
import app.db_utils.mongo_utils as database
import app.settings as config
from app.db_utils.advanced_scheduler import async_repeat_deco
from app.kafka import producers
from app.models import SearchDataPartialInDb, BetDataListUpdateInDb, PyObjectId, UserAuthTransfer, SearchDataInDb
class GenericConsumer(ABC):
bootstrap_servers = config.broker_settings.broker
@property
@abstractmethod
def group_id(self):
...
@property
@abstractmethod
def auto_offset_reset(self):
...
@property
@abstractmethod
def auto_commit(self):
...
@property
@abstractmethod
def topic(self):
...
@property
@abstractmethod
def schema(self):
...
@abstractmethod
def dict_to_model(self, map, ctx):
...
def close(self):
self._cancelled = True
self._polling_thread.join()
def consume_data(self):
if not self._polling_thread.is_alive():
self._polling_thread.start()
@abstractmethod
def _consume_data(self):
...
def reset_state(self):
self._cancelled = False
def __init__(self, loop=None, normal=False):
if not normal:
json_deserializer = JSONDeserializer(self.schema,
from_dict=self.dict_to_model)
string_deserializer = StringDeserializer('utf_8')
consumer_conf = {'bootstrap.servers': self.bootstrap_servers,
'key.deserializer': string_deserializer,
'value.deserializer': json_deserializer,
'group.id': self.group_id,
'auto.offset.reset': self.auto_offset_reset,
'enable.auto.commit': self.auto_commit,
'allow.auto.create.topics': True}
self._consumer = DeserializingConsumer(consumer_conf)
else:
consumer_conf = {'bootstrap.servers': self.bootstrap_servers,
'group.id': self.group_id,
'auto.offset.reset': self.auto_offset_reset,
'enable.auto.commit': self.auto_commit,
'allow.auto.create.topics': True}
self._consumer = Consumer(consumer_conf)
self._loop = loop or asyncio.get_event_loop()
self._cancelled = False
self._consumer.subscribe([self.topic])
self._polling_thread = threading.Thread(target=self._consume_data)
search_betdata_sync_lock = threading.Lock()
search_betdata_sync: dict[str, asyncio.Future] = {}
bet_data_update_sync_lock = threading.Lock()
bet_data_update_sync: dict[str, asyncio.Future] = {}
class PartialSearchEntryConsumer(GenericConsumer):
@property
def group_id(self):
return 'my_group_betdata'
@property
def auto_offset_reset(self):
return 'earliest'
@property
def auto_commit(self):
return False
@property
def topic(self):
return 'search-entry'
@property
def schema(self):
return """{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Partial Search data",
"description": "Partial search data",
"type": "object",
"properties": {
"web_site": {
"description": "Website name",
"type": "string"
},
"user_id": {
"description": "User's Discord id",
"type": "string"
}
},
"required": [
"web_site",
"user_id"
]
}"""
def dict_to_model(self, map, ctx):
if map is None:
return None
return SearchDataPartialInDb(**map)
@staticmethod
@scheduling.async_repeat_deco(repeat_count=3, reschedule_count=3, always_reschedule=True)
async def _rollback_data(id, tx_id):
await database.mongo.db[SearchDataPartialInDb.collection_name].delete_many({'_id': id})
await database.mongo.db['deleted_transactions'].insert_one({'tx_id': tx_id})
await database.mongo.db[BetDataListUpdateInDb.collection_name].delete_many({'search_id': id})
try:
del search_betdata_sync[tx_id]
del bet_data_update_sync[tx_id]
except:
pass
def _consume_data(self):
while not self._cancelled:
try:
msg = self._consumer.poll(0.1)
if msg is None:
continue
search_entry: SearchDataPartialInDb = msg.value()
if search_entry is not None:
id_to_insert = search_entry.id
async def complete_partial_search():
if scheduling.transaction_scheduler.get_job(msg.key()) is None:
scheduling.transaction_scheduler.add_job(self._rollback_data, 'date',
run_date=datetime.datetime.now(
pytz.utc) + datetime.timedelta(
seconds=20),
args=[id_to_insert, msg.key()],
id=msg.key(),
misfire_grace_time=None,
replace_existing=True
)
scheduling.transaction_scheduler.pause_job(msg.key())
existing_search = await database.mongo.db[SearchDataPartialInDb.collection_name].find_one(
{'tx_id': msg.key()})
if existing_search is None:
await database.mongo.db[SearchDataPartialInDb.collection_name].insert_one(
{**search_entry.dict(by_alias=True), 'tx_id': msg.key()})
scheduling.transaction_scheduler.reschedule_job(msg.key(), trigger='date',
run_date=datetime.datetime.now(
pytz.utc) + datetime.timedelta(seconds=20))
asyncio.run_coroutine_threadsafe(complete_partial_search(), self._loop).result(20)
if search_betdata_sync.get(msg.key()) is None:
with search_betdata_sync_lock:
if search_betdata_sync.get(msg.key()) is None:
search_betdata_sync[msg.key()] = self._loop.create_future()
self._loop.call_soon_threadsafe(search_betdata_sync[msg.key()].set_result, 'executed')
self._consumer.commit(msg)
else:
logging.warning(f'Null value for the message: {msg.key()}')
self._consumer.commit(msg)
except Exception as exc:
traceback.print_exc()
logging.error(exc)
try:
scheduling.transaction_scheduler.reschedule_job(job_id=msg.key(), trigger='date')
self._consumer.commit(msg)
except:
try:
self._consumer.commit(msg)
except:
pass
# break
self._consumer.close()
class BetDataApplyConsumer(GenericConsumer):
@property
def group_id(self):
return 'my_group_betdata'
@property
def auto_offset_reset(self):
return 'earliest'
@property
def auto_commit(self):
return False
@property
def topic(self):
return 'bet-data-apply'
@property
def schema(self):
return """{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "CSV Generation Request",
"description": "CSV Generation Kafka Request",
"type": "object",
"properties": {
"data": {
"description": "Bet Data",
"type": "array",
"items": {
"type": "object",
"properties": {
"date": {
"type": "string"
},
"match": {
"type": "string"
},
"one": {
"type": "string"
},
"ics": {
"type": "string"
},
"two": {
"type": "string"
},
"gol": {
"type": "string"
},
"over": {
"type": "string"
},
"under": {
"type": "string"
}
}
}
}
}
}"""
def dict_to_model(self, map, ctx):
if map is None:
return None
return BetDataListUpdateInDb(**map)
async def _update_betdata_list(self, bet_data, tx_id):
try:
search_doc = await database.mongo.db[SearchDataPartialInDb.collection_name].find_one({'tx_id': tx_id})
if search_doc is None:
deleted_tx = await database.mongo.db['deleted_transactions'].find_one({'tx_id': tx_id})
if deleted_tx is not None:
raise Exception('Transactions has been deleted!')
if search_betdata_sync.get(tx_id) is None:
with search_betdata_sync_lock:
if search_betdata_sync.get(tx_id) is None:
search_betdata_sync[tx_id] = self._loop.create_future()
await search_betdata_sync[tx_id]
scheduling.transaction_scheduler.pause_job(tx_id)
search_doc = await database.mongo.db[SearchDataPartialInDb.collection_name].find_one({'tx_id': tx_id})
search_id = search_doc['_id']
if search_doc.get('state') != 'updated':
await database.mongo.db[BetDataListUpdateInDb.collection_name].delete_many(
{'search_id': PyObjectId(search_id)})
await database.mongo.db[BetDataListUpdateInDb.collection_name].insert_many({**data.dict(),
'search_id': PyObjectId(
search_id)} for data
in
bet_data)
await database.mongo.db[SearchDataPartialInDb.collection_name].update_one({'tx_id': tx_id},
{'$set': {'state': 'updated'}})
if bet_data_update_sync.get(tx_id) is None:
with bet_data_update_sync_lock:
if bet_data_update_sync.get(tx_id) is None:
bet_data_update_sync[tx_id] = self._loop.create_future()
bet_data_update_sync[tx_id].set_result('success')
scheduling.transaction_scheduler.reschedule_job(tx_id, trigger='date',
run_date=datetime.datetime.now(
pytz.utc) + datetime.timedelta(seconds=30))
except:
logging.exception('')
scheduling.transaction_scheduler.reschedule_job(tx_id)
finally:
try:
search_betdata_sync[tx_id].cancel()
del search_betdata_sync[tx_id]
except:
pass
def _consume_data(self):
while not self._cancelled:
try:
msg = self._consumer.poll(0.1)
if msg is None:
continue
bet_data: BetDataListUpdateInDb = msg.value()
if bet_data is not None:
asyncio.run_coroutine_threadsafe(self._update_betdata_list(bet_data.data, msg.key()),
self._loop).result(20)
self._consumer.commit(msg)
else:
logging.warning(f'Null value for the message: {msg.key()}')
self._consumer.commit(msg)
except Exception as exc:
logging.exception('')
try:
scheduling.transaction_scheduler.reschedule_job(job_id=msg.key(), trigger='date')
self._consumer.commit(msg)
except:
try:
self._consumer.commit(msg)
search_betdata_sync[msg.key()].cancel()
del search_betdata_sync[msg.key()]
except:
pass
# break
self._consumer.close()
class BetDataFinishConsumer(GenericConsumer):
@property
def group_id(self):
return 'my_group_betdata'
@property
def auto_offset_reset(self):
return 'earliest'
@property
def auto_commit(self):
return False
@property
def topic(self):
return 'bet-data-finish'
@property
def schema(self):
return None
def dict_to_model(self, map, ctx):
return None
def _consume_data(self):
while not self._cancelled:
try:
msg = self._consumer.poll(0.1)
if msg is None:
continue
async def complete_transaction():
existing_search_doc = await database.mongo.db[SearchDataPartialInDb.collection_name].find_one(
{'tx_id': msg.key().decode('utf-8')})
if existing_search_doc is None or existing_search_doc.get('state') != 'updated':
deleted_tx = await database.mongo.db['deleted_transactions'].find_one(
{'tx_id': msg.key().decode('utf-8')})
if deleted_tx is not None:
raise Exception('Transactions has been deleted!')
if bet_data_update_sync.get(msg.key().decode('utf-8')) is None:
with bet_data_update_sync_lock:
if bet_data_update_sync.get(msg.key().decode('utf-8')) is None:
bet_data_update_sync[msg.key().decode('utf-8')] = self._loop.create_future()
await bet_data_update_sync[msg.key().decode('utf-8')]
await database.mongo.db[SearchDataPartialInDb.collection_name].update_one(
{'tx_id': msg.key().decode('utf-8')},
{'$set': {'csv_url': msg.value().decode('utf-8')}})
scheduling.transaction_scheduler.remove_job(msg.key().decode('utf-8'))
await asyncio.wait_for(
producers.csv_message_producer.produce(msg.key(), msg.value(), msg.headers()), 20)
asyncio.run_coroutine_threadsafe(complete_transaction(), loop=self._loop).result(20)
try:
bet_data_update_sync[msg.key().decode('utf-8')].cancel()
del bet_data_update_sync[msg.key().decode('utf-8')]
except:
pass
self._consumer.commit(msg)
except Exception as exc:
try:
scheduling.transaction_scheduler.reschedule_job(job_id=msg.key(), trigger='date')
self._consumer.commit(msg)
except:
try:
self._consumer.commit(msg)
search_betdata_sync[msg.key()].cancel()
del search_betdata_sync[msg.key()]
except:
pass
# break
self._consumer.close()
user_limit_inmemory_lock = threading.Lock()
user_limit_inmemory_cache = {}
class UserLimitAuthConsumer(GenericConsumer):
@property
def group_id(self):
return 'my_group_betdata'
@property
def auto_offset_reset(self):
return 'earliest'
@property
def auto_commit(self):
return False
@property
def topic(self):
return 'user-limit-auth'
@property
def schema(self):
return """{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "User Auth Request",
"description": "User Auth request data",
"type": "object",
"properties": {
"user_id": {
"description": "User's Discord id",
"type": "string"
},
"username": {
"description": "User's nick",
"type": "string"
}
},
"required": [
"user_id",
"username"
]
}"""
def dict_to_model(self, map, ctx):
if map is None:
return None
return UserAuthTransfer.parse_obj(map)
def _consume_data(self):
while not self._cancelled:
try:
msg = self._consumer.poll(0.1)
if msg is None:
continue
user_auth: UserAuthTransfer = msg.value()
if user_auth is not None:
async def user_search_count():
count = await database.mongo.db[SearchDataInDb.collection_name].count_documents(
{'user_id': user_auth.user_id})
await database.mongo.db['user_search_count_view'].delete_many({'user_id': user_auth.user_id})
await database.mongo.db['user_search_count_view'].insert_one({'user_id': user_auth.user_id, 'count': count})
return count
existing_user_searches = asyncio.run_coroutine_threadsafe(user_search_count(),
loop=self._loop).result(20)
if user_limit_inmemory_cache.get(msg.key()) is None:
with user_limit_inmemory_lock:
if user_limit_inmemory_cache.get(msg.key()) is None:
user_limit_inmemory_cache[msg.key()] = self._loop.create_future()
self._loop.call_soon_threadsafe(user_limit_inmemory_cache[msg.key()].set_result,
existing_user_searches)
self._consumer.commit(msg)
else:
logging.warning(f'Null value for the message: {msg.key()}')
self._consumer.commit(msg)
except Exception as exc:
logging.exception('')
try:
scheduling.transaction_scheduler.reschedule_job(job_id=msg.key(), trigger='date')
self._consumer.commit(msg)
except:
try:
self._consumer.commit(msg)
search_betdata_sync[msg.key()].cancel()
del search_betdata_sync[msg.key()]
except:
pass
# break
self._consumer.close()
class UserLimitAuthRetrieveConsumer(GenericConsumer):
@property
def group_id(self):
return 'my_group_betdata'
@property
def auto_offset_reset(self):
return 'earliest'
@property
def auto_commit(self):
return False
@property
def topic(self):
return 'user-limit-auth-retrieve'
@property
def schema(self):
return """{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "User Auth Request",
"description": "User Auth request data",
"type": "object",
"properties": {
"user_id": {
"description": "User's Discord id",
"type": "string"
},
"username": {
"description": "User's nick",
"type": "string"
}
},
"required": [
"user_id",
"username"
]
}"""
def dict_to_model(self, map, ctx):
if map is None:
return None
return UserAuthTransfer.parse_obj(map)
def _consume_data(self):
while not self._cancelled:
try:
msg = self._consumer.poll(0.1)
if msg is None:
continue
user_auth_transfer: UserAuthTransfer = msg.value()
if user_auth_transfer is not None:
async def send_user_limit_resp():
search_count_model = await database.mongo.db['user_search_count_view'].find_one({'user_id': user_auth_transfer.user_id})
if search_count_model is not None:
search_count = search_count_model.get('count')
if search_count_model is None:
if user_limit_inmemory_cache.get(msg.key()) is None:
with user_limit_inmemory_lock:
if user_limit_inmemory_cache.get(msg.key()) is None:
user_limit_inmemory_cache[msg.key()] = self._loop.create_future()
search_count = await user_limit_inmemory_cache[msg.key()]
try:
del user_limit_inmemory_cache[msg.key()]
except:
pass
producers.user_limit_auth_reply_producer.produce(msg.key(), str(search_count), msg.headers())
asyncio.run_coroutine_threadsafe(send_user_limit_resp(), loop=self._loop).result(10)
self._consumer.commit(msg)
else:
logging.warning(f'Null value for the message: {msg.key()}')
self._consumer.commit(msg)
except Exception as exc:
logging.exception('')
try:
scheduling.transaction_scheduler.reschedule_job(job_id=msg.key(), trigger='date')
self._consumer.commit(msg)
except:
try:
self._consumer.commit(msg)
search_betdata_sync[msg.key()].cancel()
del search_betdata_sync[msg.key()]
except:
pass
# break
self._consumer.close()
search_entry_consumer: PartialSearchEntryConsumer
betdata_apply_consumer: BetDataApplyConsumer
betdata_finish_consumer: BetDataFinishConsumer
user_limit_auth_consumer: UserLimitAuthConsumer
user_limit_auth_retrieve_consumer: UserLimitAuthRetrieveConsumer
def initialize_consumers():
@async_repeat_deco(3, 3, always_reschedule=True, store='alternative')
async def init_partial_search_entry_consumer(_):
global search_entry_consumer
search_entry_consumer = PartialSearchEntryConsumer(loop=asyncio.get_running_loop())
search_entry_consumer.consume_data()
@async_repeat_deco(3, 3, always_reschedule=True, store='alternative')
async def init_betdata_apply_consumer(_):
global betdata_apply_consumer
betdata_apply_consumer = BetDataApplyConsumer(loop=asyncio.get_running_loop())
betdata_apply_consumer.consume_data()
@async_repeat_deco(3, 3, always_reschedule=True, store='alternative')
async def init_betdata_finish_consumer(_):
global betdata_finish_consumer
betdata_finish_consumer = BetDataFinishConsumer(loop=asyncio.get_running_loop(), normal=True)
betdata_finish_consumer.consume_data()
@async_repeat_deco(3, 3, always_reschedule=True, store='alternative')
async def init_user_limit_auth_consumer(_):
global user_limit_auth_consumer
user_limit_auth_consumer = UserLimitAuthConsumer(loop=asyncio.get_running_loop())
user_limit_auth_consumer.consume_data()
@async_repeat_deco(3, 3, always_reschedule=True, store='alternative')
async def init_user_limit_auth_retrieve_consumer(_):
global user_limit_auth_retrieve_consumer
user_limit_auth_retrieve_consumer = UserLimitAuthRetrieveConsumer(loop=asyncio.get_running_loop())
user_limit_auth_retrieve_consumer.consume_data()
asyncio.run_coroutine_threadsafe(init_partial_search_entry_consumer('partial_search_entry_consumer'),
loop=asyncio.get_running_loop())
asyncio.run_coroutine_threadsafe(init_betdata_apply_consumer('betdata_apply_consumer'),
loop=asyncio.get_running_loop())
asyncio.run_coroutine_threadsafe(init_betdata_finish_consumer('betdata_finish_consumer'),
loop=asyncio.get_running_loop())
asyncio.run_coroutine_threadsafe(init_user_limit_auth_consumer('user_limit_auth_consumer'),
loop=asyncio.get_running_loop())
asyncio.run_coroutine_threadsafe(init_user_limit_auth_retrieve_consumer('user_limit_auth_retrieve_consumer'), loop=asyncio.get_running_loop())
def close_consumers():
search_entry_consumer.close()
betdata_apply_consumer.close()
betdata_finish_consumer.close()
user_limit_auth_consumer.close()
user_limit_auth_retrieve_consumer.close()
|
nilq/baby-python
|
python
|
from pathlib import Path
from dyslexia import io
import numpy as np
test_path = Path(__file__).resolve().parents[1]
def test_load_image_type():
image_path = test_path / "data" / "images" / "Sample_0.jpeg"
image = io.load_image(str(image_path))
assert isinstance(image, np.ndarray)
def test_load_image_size():
image_path = test_path / "data" / "images" / "Sample_0.jpeg"
image = io.load_image(str(image_path))
assert image.shape == (2607, 1834, 3)
|
nilq/baby-python
|
python
|
# Create your models here.
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import get_storage_class
from django.db import models
from django_extensions.db.models import TimeStampedModel
from core.models import User
from core.helpers import ChoicesEnum
from django.conf import settings
MEDIA_STORAGE_CLASS = get_storage_class(settings.DEFAULT_FILE_STORAGE)
class EntityTypes(ChoicesEnum):
POST = "POST"
COMMENT = "COMMENT"
ATTACHMENT = "ATTACHMENT"
class AbstractEntity(TimeStampedModel):
creator = models.ForeignKey(
User,
on_delete=models.DO_NOTHING,
related_name="%(app_label)s_%(class)s_related",
related_query_name="%(app_label)s_%(class)ss",
)
class Meta:
abstract = True
class Like(AbstractEntity):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
unique_together = ["creator", "object_id", "content_type"]
class Attachment(AbstractEntity):
# likes = GenericRelation(Like, related_query_name='comment')
file = models.FileField(storage=MEDIA_STORAGE_CLASS())
content_type = models.ForeignKey(ContentType, on_delete=models.DO_NOTHING)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Comment(AbstractEntity):
text = models.TextField()
likes = GenericRelation(Like, related_query_name="comment")
attachments = GenericRelation(Attachment, related_query_name="comment")
content_type = models.ForeignKey(ContentType, on_delete=models.DO_NOTHING)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return self.text
@property
def recent_likes(self):
return Like.objects.filter(comment=self)[:5]
class Post(AbstractEntity):
text = models.TextField()
comments = GenericRelation(Comment, related_query_name="post")
attachments = GenericRelation(Attachment, related_query_name="post")
likes = GenericRelation(Like, related_query_name="post")
@property
def top_comments(self):
return Comment.objects.filter(post=self)[:3]
@property
def recent_likes(self):
return Like.objects.filter(post=self)[:5]
def __str__(self):
return self.text
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import json
from paramiko import SSHClient, AutoAddPolicy
from robot.api import logger
CLIENT_NAME = 'ligato_papi'
class vpp_api(object):
@staticmethod
def execute_api(host, username, password, node, command, **arguments):
with PapiExecutor(host, username, password, node) as papi_exec:
papi_resp = papi_exec.add(command, **arguments).get_replies()
return papi_resp.reply
class PapiResponse(object):
"""Class for metadata specifying the Papi reply, stdout, stderr and return
code.
"""
def __init__(self, papi_reply=None, stdout="", stderr="", requests=None):
"""Construct the Papi response by setting the values needed.
:param papi_reply: API reply from last executed PAPI command(s).
:param stdout: stdout from last executed PAPI command(s).
:param stderr: stderr from last executed PAPI command(s).
:param requests: List of used PAPI requests. It is used while verifying
replies. If None, expected replies must be provided for verify_reply
and verify_replies methods.
:type papi_reply: list or None
:type stdout: str
:type stderr: str
:type requests: list
"""
# API reply from last executed PAPI command(s).
self.reply = papi_reply
# stdout from last executed PAPI command(s).
self.stdout = stdout
# stderr from last executed PAPI command(s).
self.stderr = stderr
# List of used PAPI requests.
self.requests = requests
# List of expected PAPI replies. It is used while verifying replies.
if self.requests:
self.expected_replies = \
["{rqst}_reply".format(rqst=rqst) for rqst in self.requests]
def __str__(self):
"""Return string with human readable description of the PapiResponse.
:returns: Readable description.
:rtype: str
"""
return (
"papi_reply={papi_reply},stdout={stdout},stderr={stderr},"
"requests={requests}").format(
papi_reply=self.reply, stdout=self.stdout, stderr=self.stderr,
requests=self.requests)
def __repr__(self):
"""Return string executable as Python constructor call.
:returns: Executable constructor call.
:rtype: str
"""
return "PapiResponse({str})".format(str=str(self))
class PapiExecutor(object):
"""Contains methods for executing VPP Python API commands on DUTs.
Note: Use only with "with" statement, e.g.:
with PapiExecutor(node) as papi_exec:
papi_resp = papi_exec.add('show_version').get_replies(err_msg)
This class processes three classes of VPP PAPI methods:
1. simple request / reply: method='request',
2. dump functions: method='dump',
3. vpp-stats: method='stats'.
The recommended ways of use are (examples):
1. Simple request / reply
a. One request with no arguments:
with PapiExecutor(node) as papi_exec:
data = papi_exec.add('show_version').get_replies().\
verify_reply()
b. Three requests with arguments, the second and the third ones are the same
but with different arguments.
with PapiExecutor(node) as papi_exec:
data = papi_exec.add(cmd1, **args1).add(cmd2, **args2).\
add(cmd2, **args3).get_replies(err_msg).verify_replies()
2. Dump functions
cmd = 'sw_interface_rx_placement_dump'
with PapiExecutor(node) as papi_exec:
papi_resp = papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index']).\
get_dump(err_msg)
3. vpp-stats
path = ['^/if', '/err/ip4-input', '/sys/node/ip4-input']
with PapiExecutor(node) as papi_exec:
data = papi_exec.add(api_name='vpp-stats', path=path).get_stats()
print('RX interface core 0, sw_if_index 0:\n{0}'.\
format(data[0]['/if/rx'][0][0]))
or
path_1 = ['^/if', ]
path_2 = ['^/if', '/err/ip4-input', '/sys/node/ip4-input']
with PapiExecutor(node) as papi_exec:
data = papi_exec.add('vpp-stats', path=path_1).\
add('vpp-stats', path=path_2).get_stats()
print('RX interface core 0, sw_if_index 0:\n{0}'.\
format(data[1]['/if/rx'][0][0]))
Note: In this case, when PapiExecutor method 'add' is used:
- its parameter 'csit_papi_command' is used only to keep information
that vpp-stats are requested. It is not further processed but it is
included in the PAPI history this way:
vpp-stats(path=['^/if', '/err/ip4-input', '/sys/node/ip4-input'])
Always use csit_papi_command="vpp-stats" if the VPP PAPI method
is "stats".
- the second parameter must be 'path' as it is used by PapiExecutor
method 'add'.
"""
def __init__(self, host, username, password, node):
"""Initialization.
"""
# Node to run command(s) on.
self.host = host
self.node = node
self.username = username
self.password = password
self._ssh = SSHClient()
self._ssh.set_missing_host_key_policy(AutoAddPolicy())
# The list of PAPI commands to be executed on the node.
self._api_command_list = list()
def __enter__(self):
try:
self._ssh.connect(self.host, username=self.username, password=self.password)
except IOError:
raise RuntimeError("Cannot open SSH connection to host {host} to "
"execute PAPI command(s)".
format(host=self.host))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._ssh.close()
def add(self, csit_papi_command="vpp-stats", **kwargs):
"""Add next command to internal command list; return self.
The argument name 'csit_papi_command' must be unique enough as it cannot
be repeated in kwargs.
:param csit_papi_command: VPP API command.
:param kwargs: Optional key-value arguments.
:type csit_papi_command: str
:type kwargs: dict
:returns: self, so that method chaining is possible.
:rtype: PapiExecutor
"""
self._api_command_list.append(dict(api_name=csit_papi_command,
api_args=kwargs))
return self
def get_replies(self,
process_reply=True, ignore_errors=False, timeout=120):
"""Get reply/replies from VPP Python API.
:param process_reply: Process PAPI reply if True.
:param ignore_errors: If true, the errors in the reply are ignored.
:param timeout: Timeout in seconds.
:type process_reply: bool
:type ignore_errors: bool
:type timeout: int
:returns: Papi response including: papi reply, stdout, stderr and
return code.
:rtype: PapiResponse
"""
return self._execute(
method='request', process_reply=process_reply,
ignore_errors=ignore_errors, timeout=timeout)
@staticmethod
def _process_api_data(api_d):
"""Process API data for smooth converting to JSON string.
Apply binascii.hexlify() method for string values.
:param api_d: List of APIs with their arguments.
:type api_d: list
:returns: List of APIs with arguments pre-processed for JSON.
:rtype: list
"""
def process_value(val):
"""Process value.
:param val: Value to be processed.
:type val: object
:returns: Processed value.
:rtype: dict or str or int
"""
if isinstance(val, dict):
val_dict = dict()
for val_k, val_v in val.items():
val_dict[str(val_k)] = process_value(val_v)
return val_dict
else:
return binascii.hexlify(val) if isinstance(val, str) else val
api_data_processed = list()
for api in api_d:
api_args_processed = dict()
for a_k, a_v in api["api_args"].iteritems():
api_args_processed[str(a_k)] = process_value(a_v)
api_data_processed.append(dict(api_name=api["api_name"],
api_args=api_args_processed))
return api_data_processed
@staticmethod
def _revert_api_reply(api_r):
"""Process API reply / a part of API reply.
Apply binascii.unhexlify() method for unicode values.
:param api_r: API reply.
:type api_r: dict
:returns: Processed API reply / a part of API reply.
:rtype: dict
"""
reply_dict = dict()
reply_value = dict()
for reply_key, reply_v in api_r.items():
for a_k, a_v in reply_v.iteritems():
reply_value[a_k] = binascii.unhexlify(a_v) \
if isinstance(a_v, str) else a_v
reply_dict[reply_key] = reply_value
return reply_dict
def _process_reply(self, api_reply):
"""Process API reply.
:param api_reply: API reply.
:type api_reply: dict or list of dict
:returns: Processed API reply.
:rtype: list or dict
"""
if isinstance(api_reply, list):
reverted_reply = [self._revert_api_reply(a_r) for a_r in api_reply]
else:
reverted_reply = self._revert_api_reply(api_reply)
return reverted_reply
def _execute_papi(self, api_data, method='request', timeout=120):
"""Execute PAPI command(s) on remote node and store the result.
:param api_data: List of APIs with their arguments.
:param method: VPP Python API method. Supported methods are: 'request',
'dump' and 'stats'.
:param timeout: Timeout in seconds.
:type api_data: list
:type method: str
:type timeout: int
:returns: Stdout and stderr.
:rtype: 2-tuple of str
:raises SSHTimeout: If PAPI command(s) execution has timed out.
:raises RuntimeError: If PAPI executor failed due to another reason.
:raises AssertionError: If PAPI command(s) execution has failed.
"""
if not api_data:
RuntimeError("No API data provided.")
json_data = json.dumps(api_data) \
if method in ("stats", "stats_request") \
else json.dumps(self._process_api_data(api_data))
cmd = "docker exec {node} python3 {fw_dir}/{papi_provider} --data '{json}'". \
format(node=self.node,
fw_dir="/opt",
papi_provider="vpp_api_executor.py",
json=json_data)
logger.debug(cmd)
stdin, stdout, stderr = self._ssh.exec_command(
cmd, timeout=timeout)
stdout = stdout.read()
stderr = stderr.read()
return stdout, stderr
def _execute(self, method='request', process_reply=True,
ignore_errors=False, timeout=120):
"""Turn internal command list into proper data and execute; return
PAPI response.
This method also clears the internal command list.
IMPORTANT!
Do not use this method in L1 keywords. Use:
- get_stats()
- get_replies()
- get_dump()
:param method: VPP Python API method. Supported methods are: 'request',
'dump' and 'stats'.
:param process_reply: Process PAPI reply if True.
:param ignore_errors: If true, the errors in the reply are ignored.
:param timeout: Timeout in seconds.
:type method: str
:type process_reply: bool
:type ignore_errors: bool
:type timeout: int
:returns: Papi response including: papi reply, stdout, stderr and
return code.
:rtype: PapiResponse
:raises KeyError: If the reply is not correct.
"""
local_list = self._api_command_list
# Clear first as execution may fail.
self._api_command_list = list()
stdout, stderr = self._execute_papi(
local_list, method=method, timeout=timeout)
papi_reply = list()
if process_reply:
try:
json_data = json.loads(stdout)
except ValueError:
logger.error(
"An error occured while processing the PAPI reply:\n"
"stdout: {stdout}\n"
"stderr: {stderr}".format(stdout=stdout, stderr=stderr))
raise
for data in json_data:
try:
api_reply_processed = dict(
api_name=data["api_name"],
api_reply=self._process_reply(data["api_reply"]))
except KeyError:
if ignore_errors:
continue
else:
raise
papi_reply.append(api_reply_processed)
# Log processed papi reply to be able to check API replies changes
logger.debug("Processed PAPI reply: {reply}".format(reply=papi_reply))
return PapiResponse(
papi_reply=papi_reply, stdout=stdout, stderr=stderr,
requests=[rqst["api_name"] for rqst in local_list])
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import spectral_norm
def _conv_func(ndim=2, transpose=False):
"Return the proper conv `ndim` function, potentially `transposed`."
assert 1 <= ndim <=3
return getattr(nn, f'Conv{"Transpose" if transpose else ""}{ndim}d')
def init_linear(m, act_func=None, init='auto', bias_std=0.01):
if getattr(m,'bias',None) is not None and bias_std is not None:
if bias_std != 0: normal_(m.bias, 0, bias_std)
else: m.bias.data.zero_()
if init=='auto':
if act_func in (F.relu_,F.leaky_relu_): init = kaiming_uniform_
else: init = getattr(act_func.__class__, '__default_init__', None)
if init is None: init = getattr(act_func, '__default_init__', None)
if init is not None: init(m.weight)
class SpectralConvLayer(nn.Sequential):
"Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and `norm_type` layers."
def __init__(self, ni, nf, ks=3, stride=1, padding=None, bias=None, ndim=2, init='auto', bias_std=0.01, **kwargs):
if padding is None: padding = (ks-1)//2
conv_func = _conv_func(ndim)
conv = conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding, **kwargs)
init_linear(conv, None, init=init, bias_std=bias_std)
conv = spectral_norm(conv)
layers = [conv]
super().__init__(*layers)
class SelfAttention(nn.Module):
"Self attention layer for `n_channels`."
def __init__(self, n_channels):
super().__init__()
self.query, self.key, self.value = [self._conv(n_channels, c) for c in (n_channels//8, n_channels//8, n_channels)]
self.gamma = nn.Parameter(torch.Tensor([0.]))
def _conv(self,n_in,n_out):
return SpectralConvLayer(n_in, n_out, ks=1, ndim=1, bias=False)
def forward(self, x):
#Notation from the paper.
size = x.size()
x = x.view(*size[:2],-1)
f,g,h = self.query(x),self.key(x),self.value(x)
beta = F.softmax(torch.bmm(f.transpose(1,2), g), dim=1)
o = self.gamma * torch.bmm(h, beta) + x
return o.view(*size).contiguous()
# implementation below taken from
# https://github.com/sdoria/SimpleSelfAttention/blob/master/v0.1/Imagenette%20Simple%20Self%20Attention.ipynb
#Unmodified from https://github.com/fastai/fastai/blob/5c51f9eabf76853a89a9bc5741804d2ed4407e49/fastai/layers.py
def conv1d(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False):
"Create and initialize a `nn.Conv1d` layer with spectral normalization."
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias: conv.bias.data.zero_()
return spectral_norm(conv)
# Adapted from SelfAttention layer at https://github.com/fastai/fastai/blob/5c51f9eabf76853a89a9bc5741804d2ed4407e49/fastai/layers.py
# Inspired by https://arxiv.org/pdf/1805.08318.pdf
class SimpleSelfAttention(nn.Module):
def __init__(self, n_in: int, ks=1): # , n_out:int):
super().__init__()
self.conv = conv1d(n_in, n_in, ks, padding=ks // 2, bias=False)
self.gamma = nn.Parameter(torch.tensor([0.]))
# self.sym = sym
self.n_in = n_in
def forward(self, x: torch.Tensor):
size = x.size()
x = x.view(*size[:2], -1) # (C,N)
convx = self.conv(x) # (C,C) * (C,N) = (C,N) => O(NC^2)
xxT = torch.bmm(x, x.permute(0, 2, 1).contiguous()) # (C,N) * (N,C) = (C,C) => O(NC^2)
o = torch.bmm(xxT, convx) # (C,C) * (C,N) = (C,N) => O(NC^2)
o = self.gamma * o + x
return o.view(*size).contiguous()
|
nilq/baby-python
|
python
|
import numpy as np
import numba as nb
@nb.jit(nopython=True, parallel=False, fastmath=True)
def kernel(M, float_n, data):
# mean = np.mean(data, axis=0)
mean = np.sum(data, axis=0) / float_n
data -= mean
cov = np.zeros((M, M), dtype=data.dtype)
# for i in range(M):
# for j in range(i, M):
# cov[i, j] = np.sum(data[:, i] * data[:, j])
# cov[i, j] /= float_n - 1.0
# cov[j, i] = cov[i, j]
for i in range(M):
cov[i:M, i] = cov[i, i:M] = data[:, i] @ data[:, i:M] / (float_n - 1.0)
return cov
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class NotificationsConfig(AppConfig):
name = 'safe_transaction_service.notifications'
verbose_name = 'Notifications for Safe Transaction Service'
|
nilq/baby-python
|
python
|
import sys
import subprocess
import os
if sys.platform == 'win32':
dir_path = os.path.dirname(os.path.realpath(__file__))
if len(sys.argv) >= 2:
subprocess.Popen(['startup.bat', sys.argv[1]], cwd=dir_path)
else:
subprocess.Popen(['startup.bat'], cwd=dir_path)
elif sys.platform in ['darwin', 'linux2', 'linux']:
if len(sys.argv) >= 2:
subprocess.Popen(['sh', './startup.sh', sys.argv[1]])
else:
subprocess.Popen(['sh', './startup.sh'])
else:
print('Operating system not recognized')
|
nilq/baby-python
|
python
|
"""
parse simple structures from an xml tree
We only support a subset of features but should be enough
for custom structures
"""
import os
import importlib
from lxml import objectify
from opcua.ua.ua_binary import Primitives
def get_default_value(uatype):
if uatype == "String":
return "None"
elif uatype == "Guid":
return "uuid.uuid4()"
elif uatype in ("ByteString", "CharArray", "Char"):
return None
elif uatype == "Boolean":
return "True"
elif uatype == "DateTime":
return "datetime.utcnow()"
elif uatype in ("Int8", "Int16", "Int32", "Int64", "UInt8", "UInt16", "UInt32", "UInt64", "Double", "Float", "Byte", "SByte"):
return 0
else:
return "ua." + uatype + "()"
class Struct(object):
def __init__(self, name):
self.name = name
self.fields = []
self.code = ""
def get_code(self):
if not self.fields:
return """
class {}(object):
pass
""".format(self.name)
self._make_constructor()
self._make_from_binary()
self._make_to_binary()
return self.code
def _make_constructor(self):
self.code = """
class {0}(object):
'''
{0} structure autogenerated from xml
'''
def __init__(self, data=None):
if data is not None:
self._binary_init(data)
return
""".format(self.name)
for field in self.fields:
self.code += " self.{} = {}\n".format(field.name, field.value)
def _make_from_binary(self):
self.code += '''
@staticmethod
def from_binary(data):
return {}(data=data)
def _binary_init(self, data):
'''.format(self.name)
for field in self.fields:
if hasattr(Primitives, field.uatype):
if field.array:
self.code += ' self.{} = ua.ua_binary.Primitives.{}.unpack_array(data)\n'.format(field.name, field.uatype)
else:
self.code += ' self.{} = ua.ua_binary.Primitives.{}.unpack(data)\n'.format(field.name, field.uatype)
else:
if field.array:
self.code += '''
length = ua.ua_binary.Primitives.Int32.unpack(data)
if length == -1:
self.{0} = None
else:
self.{0} = [ua.{1}.from_binary(data) for _ in range(length)]
'''.format(field.name, field.uatype)
else:
self.code += " self.{} = ua.{}.from_binary(data)\n".format(field.name, field.uatype)
def _make_to_binary(self):
self.code += '''
def to_binary(self):
packet = []
'''
for field in self.fields:
if hasattr(Primitives, field.uatype):
if field.array:
self.code += ' packet.append(ua.ua_binary.Primitives.{}.pack_array(self.{}))\n'.format(field.uatype, field.name)
else:
self.code += ' packet.append(ua.ua_binary.Primitives.{}.pack(self.{}))\n'.format(field.uatype, field.name)
else:
if field.array:
self.code += '''
if self.{0} is None:
packet.append(ua.ua_binary.Primitives.Int32.pack(-1))
else:
packet.append(ua.ua_binary.Primitives.Int32.pack(len(self.{0})))
for element in self.{0}:
packet.append(element.to_binary())
'''.format(field.name)
else:
self.code += " packet.append(self.{}.to_binary())\n".format(field.name)
self.code += ' return b"".join(packet)'
class Field(object):
def __init__(self, name):
self.name = name
self.uatype = None
self.value = None
self.array = False
class StructGenerator(object):
def __init__(self):
self.model = []
def make_model_from_string(self, xml):
obj = objectify.fromstring(xml)
self._make_model(obj)
def make_model_from_file(self, path):
obj = objectify.parse(path)
root = obj.getroot()
self._make_model(root)
def _make_model(self, root):
for child in root.iter("{*}StructuredType"):
struct = Struct(child.get("Name"))
array = False
for xmlfield in child.iter("{*}Field"):
name = xmlfield.get("Name")
if name.startswith("NoOf"):
array = True
continue
field = Field(name)
field.uatype = xmlfield.get("TypeName")
if ":" in field.uatype:
field.uatype = field.uatype.split(":")[1]
field.value = get_default_value(field.uatype)
if array:
field.array = True
field.value = []
array = False
struct.fields.append(field)
self.model.append(struct)
def save_to_file(self, path):
_file = open(path, "wt")
self._make_header(_file)
for struct in self.model:
_file.write(struct.get_code())
_file.close()
def save_and_import(self, path, append_to=None):
"""
save the new structures to a python file which be used later
import the result and return resulting classes in a dict
if append_to is a dict, the classes are added to the dict
"""
self.save_to_file(path)
name = os.path.basename(path)
name = os.path.splitext(name)[0]
mymodule = importlib.import_module(name)
if append_to is None:
result = {}
else:
result = append_to
for struct in self.model:
result[struct.name] = getattr(mymodule, struct.name)
return result
def get_structures(self):
ld = {}
for struct in self.model:
exec(struct.get_code(), ld)
return ld
def _make_header(self, _file):
_file.write("""
'''
THIS FILE IS AUTOGENERATED, DO NOT EDIT!!!
'''
from datetime import datetime
import uuid
from opcua import ua
""")
if __name__ == "__main__":
import sys
from IPython import embed
sys.path.insert(0, ".") # necessary for import in current dir
#xmlpath = "schemas/Opc.Ua.Types.bsd"
xmlpath = "schemas/example.bsd"
c = StructGenerator(xmlpath, "structures.py")
c.run()
import structures as s
#sts = c.get_structures()
embed()
|
nilq/baby-python
|
python
|
import os
import json
import xmltodict
xml_list = os.listdir("./xml/")
eng_reading = json.loads(open("./noword.json", "r").read())
eng_data = eng_reading["data"]
n = 0
for data in eng_data:
text = data["text"]
for t in text:
word_list = []
try:
xml = "./xml/" + xml_list[n]
with open(xml, 'r', encoding='UTF-8') as f:
doc = xmltodict.parse(f.read())
for word in doc['Frhelper_Backup']['StudyLists']['CustomizeListItem']:
word_list.append(word["@word"])
f.close()
print(n)
except:
pass
t["words"] = word_list
n+=1
new_reading = json.dumps(eng_reading)
with open("./english_reading_10_19_word_191003.json", "w") as e:
e.write(new_reading)
print("数据解析存储完毕!!!")
|
nilq/baby-python
|
python
|
message = 'vyv gri kbo iye cdsvv nomynsxq drsc mszrob iye kbo tecd gkcdsxq iyeb dswo vyv hnnnn' # encrypted message
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = message.upper()
def decrypt(message, LETTERS):
for key in range(len(LETTERS)):
translated = ''
for symbol in message:
if symbol in LETTERS:
num = LETTERS.find(symbol)
num = num - key
if num < 0:
num = num + len(LETTERS)
translated = translated + LETTERS[num]
else:
translated = translated + symbol
print('Hacking key #{}: {}'.format(key, translated))
decrypt(message, LETTERS)
|
nilq/baby-python
|
python
|
from django.shortcuts import render, get_object_or_404
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from .models import Post, Category
from .forms import CatTransferForm
def category(request, slug=None):
if slug:
instance = get_object_or_404(Category, slug=slug)
all_posts = Post.published_objects.filter(category=instance)
else:
instance = None
all_posts = Post.published_objects.all()
ctx = {'category': instance, 'posts': all_posts}
return render(request, 'category.html', ctx)
def post(request, year, month, slug):
article = get_object_or_404(Post, publish__year=year,
publish__month=month, slug=slug)
ctx = {'article': article}
return render(request, 'article.html', ctx)
def user_posts(request, userid):
user = get_object_or_404(User, id=userid)
all_posts = Post.objects.filter(author=user, publish__isnull=False)
ctx = {'author': user, 'posts': all_posts}
return render(request, 'category.html', ctx)
@staff_member_required
def transfer_posts_tool(request):
if request.method == 'POST':
form = CatTransferForm(request.POST)
if form.is_valid():
Post.objects.filter(category__in=form.cleaned_data['from_cats']).update(
category=form.cleaned_data['to_cat'])
else:
form = CatTransferForm()
ctx = {
'form': form,
}
return render(request, 'transfer_tool.html', ctx)
|
nilq/baby-python
|
python
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource projections supplementary help."""
import textwrap
from googlecloudsdk.calliope import base
from googlecloudsdk.core.resource import resource_topics
class Projections(base.TopicCommand):
"""Resource projections supplementary help."""
detailed_help = {
# pylint: disable=protected-access, need transform dicts.
'DESCRIPTION': textwrap.dedent("""\
{description}
### Projections
A projection is a list of keys that selects resource data values.
Projections are used in *--format* flag expressions. For example, the
*table* format requires a projection that describes the table columns:
table(name, network.ip.internal, network.ip.external, uri())
### Transforms
A *transform* formats resource data values. Each projection key may
have zero or more transform calls:
_key_._transform_([arg...])...
This example applies the *foo*() and then the *bar*() transform to the
*status.time* resource value:
(name, status.time.foo().bar())
{transform_registry}
### Key Attributes
Key attributes control formatted output. Each projection key may have
zero or more attributes:
_key_:_attribute_=_value_...
where =_value_ is omitted for Boolean attributes and no-_attribute_
sets the attribute to false. Attribute values may appear in any order,
but must be specified after any transform calls. The attributes are:
*alias*=_ALIAS-NAME_::
Sets _ALIAS-NAME_ as an alias for the projection key.
*align*=_ALIGNMENT_::
Specifies the output column data alignment. Used by the *table*
format. The alignment values are:
*left*:::
Left (default).
*center*:::
Center.
*right*:::
Right.
*label*=_LABEL_::
A string value used to label output. Use :label="" or :label=''
for no label. The *table* format uses _LABEL_ values as column
headings. Also sets _LABEL_ as an alias for the projection key.
The default label is the the disambiguated right hand parts of the
column key name in ANGRY_SNAKE_CASE.
[no-]*reverse*::
Sets the key sort order to descending. *no-reverse* resets to the
default ascending order.
*sort*=_SORT-ORDER_::
An integer counting from 1. Keys with lower sort-order are sorted
first. Keys with same sort order are sorted left to right.
*wrap*::
Enables the column text to be wrapped if the table would otherwise
be too wide for the display.
""").format(
description=resource_topics.ResourceDescription('projection'),
transform_registry=
resource_topics.TransformRegistryDescriptions()),
'EXAMPLES': """\
List a table of instance *zone* (sorted in descending order) and
*name* (sorted by *name* and centered with column heading *INSTANCE*)
and *creationTimestamp* (listed using the *strftime*(3) year-month-day
format with column heading *START*):
$ gcloud compute instances list --format='table(name:sort=2:align=center:label=INSTANCE, zone:sort=1:reverse, creationTimestamp.date("%Y-%m-%d"):label=START)'
List only the *name*, *status* and *zone* instance resource keys in
YAML format:
$ gcloud compute instances list --format='yaml(name, status, zone)'
List only the *config.account* key value(s) in the *info* resource:
$ gcloud info --format='value(config.account)'
""",
}
|
nilq/baby-python
|
python
|
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
from collections import deque
class NestedIterator(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.stack = deque(nestedList[::-1])
self.value = None
def next(self):
"""
:rtype: int
"""
self.hasNext()
ret = self.value
self.value = None
return ret
def hasNext(self):
"""
:rtype: bool
"""
if self.value is not None:
return True
stack = self.stack
while stack:
top = stack.pop()
if top.isInteger():
self.value = top.getInteger()
return True
else:
stack.extend(top.getList()[::-1])
return False
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next())
|
nilq/baby-python
|
python
|
"""Services Page Locator Class"""
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
from tests.web_app_tests.parabank_test.page_locators.base_page_locator import BasePageLocator
class ServicesPageLocator(BasePageLocator):
"""
Services Page Locator Class
Holds all relevant locators for 'Services' page web elements.
Each locator is a tuple.
Separate the locator strings from the place where they are being used.
"""
pass
|
nilq/baby-python
|
python
|
from .state import State
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
import os
import shutil
import warnings
from django.core.management import execute_from_command_line
os.environ['DJANGO_SETTINGS_MODULE'] = 'wagtail.tests.settings'
def runtests():
# Don't ignore DeprecationWarnings
warnings.simplefilter('default', DeprecationWarning)
warnings.simplefilter('default', PendingDeprecationWarning)
args = sys.argv[1:]
if '--postgres' in args:
os.environ['DATABASE_ENGINE'] = 'django.db.backends.postgresql_psycopg2'
args.remove('--postgres')
if '--elasticsearch' in args:
os.environ.setdefault('ELASTICSEARCH_URL', 'http://localhost:9200')
args.remove('--elasticsearch')
argv = sys.argv[:1] + ['test'] + args
try:
execute_from_command_line(argv)
finally:
from wagtail.tests.settings import STATIC_ROOT, MEDIA_ROOT
shutil.rmtree(STATIC_ROOT, ignore_errors=True)
shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
if __name__ == '__main__':
runtests()
|
nilq/baby-python
|
python
|
favorite_word = "coding" # valid string using double quotes
favorite_word = 'coding' # also valid string using single quotes
print(favorite_word)
|
nilq/baby-python
|
python
|
#encoding=utf-8
import torch
import os
import json
import argparse
import logging
import random
import numpy as np
from typing import NamedTuple
from dataset import MyBartTokenizer, Dataset
from models import Config as ModelConfig
from models import MyPLVCG, MyClassificationPLVCG
from train import test_rank
parser = argparse.ArgumentParser(description='test_rank.py')
parser.add_argument('-input_path', type=str, default='LiveBot', help="input folder path")
parser.add_argument('-workspace_path', type=str, default='LiveBot/MyPLVCG', help="output and config folders path")
parser.add_argument('-model_cfg_file', type=str, default=os.path.join('config', 'model.json'), help="model config file")
parser.add_argument('-rank_cfg_file', type=str, default=os.path.join('config', 'rank.json'), help="pretrain config file")
parser.add_argument('-img_file', type=str, default='res18.pkl', help="image file")
parser.add_argument('-test_corpus_file', type=str, default='test-candidate.json', help="test corpus json file")
parser.add_argument('-vocab_file', type=str, default='dicts-30000_tokenizer.json', help="vocabulary json file")
parser.add_argument('-merges_file', type=str, default='merges.txt', help="merge tokens")
parser.add_argument('-video_type_map_file', type=str, default='video_type.json', help="video type json file")
parser.add_argument('-preprocess_dir', type=str, default='preprocessed_data', help="path of preprocessed files")
parser.add_argument('-save_dir', type=str, default='ckpt_cf', help="checkpoint folder")
parser.add_argument('-model_file', type=str, default='best-model.pt', help="Restoring model file")
parser.add_argument('-rank_dir', type=str, default='rank', help="rank folder")
parser.add_argument('-log_dir', type=str, default='log', help="log folder")
parser.add_argument('-load', default=False, action='store_true', help="load scores")
parser.add_argument('-model_from', type=str, default='classification', help="the type of model to load")
class RankConfig(NamedTuple):
""" Hyperparameters for training """
seed: int = 3431 # random seed
predict_batch_size: int = 1
total_steps: int = 0 # total number of steps to train
weight_decay: float = 0.0
max_output_length: int = 20
print_steps: int = 100
classification_thread : float=0.0
@classmethod
def load_from_json(cls, file): # load config from json file
return cls(**json.load(open(file, "r")))
def ranking():
opt = parser.parse_args()
rank_config = RankConfig.load_from_json(os.path.join(opt.workspace_path, opt.rank_cfg_file))
model_cfg = ModelConfig.load_from_json(os.path.join(opt.workspace_path, opt.model_cfg_file))
img_file = os.path.join(opt.input_path, opt.img_file)
test_corpus_file = os.path.join(opt.input_path, opt.test_corpus_file)
vocab_file = os.path.join(opt.input_path, opt.vocab_file)
merges_file = os.path.join(opt.input_path, opt.merges_file)
video_type_map_file = os.path.join(opt.input_path, opt.video_type_map_file)
preprocess_dir = os.path.join(opt.workspace_path, opt.preprocess_dir)
rank_dir = os.path.join(opt.workspace_path, opt.rank_dir)
log_dir = os.path.join(opt.workspace_path, opt.log_dir)
save_dir = os.path.join(opt.workspace_path, opt.save_dir)
model_file = os.path.join(save_dir, opt.model_file)
log_filename = "{}log.txt".format("rank_")
log_filename = os.path.join(log_dir,log_filename)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(opt.log_dir, log_filename)),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.info(opt)
tokenizer = MyBartTokenizer(vocab_file, merges_file)
test_data = Dataset(vocab_file, test_corpus_file, img_file, video_type_map_file, preprocess_dir, model_cfg, rank_config, imgs=None, is_training=False, type = 'test')
test_data.load_test_dataset(tokenizer)
test_data.load_dataloader()
if opt.model_from == 'classification':
model = MyClassificationPLVCG(model_cfg, type='test')
logger.info("Loading checkpoint from {}".format(model_file))
model.load_state_dict(torch.load(model_file))
else:
model = MyPLVCG(model_cfg, type='test')
logger.info("Loading checkpoint from {}".format(model_file))
model.load_state_dict(torch.load(model_file))
if torch.cuda.is_available():
model.to(torch.device("cuda"))
model.eval()
if opt.load:
with open(os.path.join(rank_dir,'rank_score_%s.json'%(opt.model_from)), "r") as f:
scores, pred_list = json.load(f)
ranks = [sorted(range(len(score)), key=lambda k: score[k],reverse=True) for score in scores]
# ============================= for random ================================
#``random.shuffle (ranks )
# ============================= for random ================================
else:
with(torch.no_grad()):
ranks, scores, pred_list = test_rank(rank_config, model, test_data, type='classification')
f_scores = open(os.path.join(rank_dir,'rank_score_%s.json'%(opt.model_from)),'w', encoding='utf8')
scores = [np.array(s.cpu()).tolist() for s in scores]
json.dump([scores,pred_list], f_scores)
predictions = []
references = []
hits_1 = 0
hits_5 = 0
hits_10 = 0
mean_rank = 0
mean_reciprocal_rank = 0
f_outs = open(os.path.join(rank_dir,'out.txt'),'w', encoding='utf8')
for i, rank in enumerate(ranks):
gt_dic = test_data.gts[i]
pred_b = pred_list[i]
candidate = []
comments = list(gt_dic.keys())
for id in rank:
candidate.append(comments[id])
f_outs.write("\n========================\n")
predictions.append(candidate)
references.append(gt_dic)
hit_rank = calc_hit_rank(candidate, gt_dic)
f_outs.write("%d\n"%(hit_rank))
cont = test_data.decode(test_data.contexts[i])
end = cont.find("<PAD>")
if end != -1:
cont = cont[:end]
f_outs.write("%s\n"%(cont))
for j,id in enumerate(rank):
if opt.model_from == 'classification':
p = pred_b
f_outs.write("%d %d %d %f %d %s || %d\n"%(i,j,rank[j],scores[i][rank[j]],gt_dic[comments[id]],comments[id],p))
else:
p = pred_b[rank[j]]
f_outs.write("%d %d %d %f %d %s || %s\n"%(i,j,rank[j],scores[i][rank[j]],gt_dic[comments[id]],comments[id],p))
mean_rank += hit_rank
mean_reciprocal_rank += 1.0/hit_rank
hits_1 += int(hit_rank <= 1)
hits_5 += int(hit_rank <= 5)
hits_10 += int(hit_rank <= 10)
#for j,g in enumerate(gt_dic.keys()):
# print(scores[i][j], g, gt_dic[g])
f_outs.close()
total = len(test_data.gts)
f_o = open(os.path.join(rank_dir, 'rank_res.txt'),'w', encoding='utf8')
print("\t r@1:%f \t r@5:%f \t r@10:%f \t mr:%f \t mrr:%f"%(hits_1/total*100,hits_5/total*100,hits_10/total*100,mean_rank/total,mean_reciprocal_rank/total))
f_o.write("\t r@1:%f \t r@5:%f \t r@10:%f \t mr:%f \t mrr:%f"%(hits_1/total*100,hits_5/total*100,hits_10/total*100,mean_rank/total,mean_reciprocal_rank/total))
def calc_hit_rank(prediction, reference):
for i, p in enumerate(prediction):
if reference[p] == 1:
#print(i,p,reference[p])
return i+1
print(prediction)
print(reference)
raise ValueError('No reference!')
def recall(predictions, references, k=1):
assert len(predictions) == len(references)
total = len(references)
hits = 0
for p, c in zip(predictions, references):
hits += int(calc_hit_rank(p, c) <= k)
return hits * 100.0 / total
if __name__ == '__main__':
ranking()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import os
import yaml
import re
COLOR_METHOD = '#7fb800'
COLOR_PARAM = '#00a6ed'
COLOR_INSTANCE_VAR = '#f8805a'
COLOR_NOTE = '#8E8E8E'
COLOR_WARNING = '#ED2F2F'
QUERY = re.compile(r'([cC]arla(\.[a-zA-Z0-9_]+)+)')
def create_hyperlinks(text):
return re.sub(QUERY, r'[\1](#\1)', text)
def join(elem, separator=''):
return separator.join(elem)
class MarkdownFile:
def __init__(self):
self._data = ""
self._list_depth = 0
self.endl = ' \n'
def data(self):
return self._data
def list_push(self, buf=''):
if buf:
self.text(join([
' ' * self._list_depth if self._list_depth != 0 else '', '- ', buf]))
self._list_depth = (self._list_depth + 1)
def list_pushn(self, buf):
self.list_push(join([buf, self.endl]))
def list_pop(self):
self._list_depth = max(self._list_depth - 1, 0)
def list_popn(self):
self.list_pop()
self._data = join([self._data, '\n'])
def list_depth(self):
if self._data.strip()[-1:] != '\n' or self._list_depth == 0:
return ''
return join([' ' * self._list_depth])
def separator(self):
self._data = join([self._data, '\n---\n'])
def new_line(self):
self._data = join([self._data, self.endl])
def text(self, buf):
self._data = join([self._data, buf])
def textn(self, buf):
self._data = join([self._data, self.list_depth(), buf, self.endl])
def title(self, strongness, buf):
self._data = join([
self._data, '\n', self.list_depth(), '#' * strongness, ' ', buf, '\n'])
def title_html(self, strongness, buf):
self._data = join([
self._data, '\n', self.list_depth(), '<h', str(strongness), '>', buf, '</h', str(strongness), '>\n'])
def inherit_join(self, inh):
self._data = join([
self._data,'<div style="padding-left:30px;margin-top:-20px"><small><b>Inherited from ',inh,'</b></small></div></p><p>'])
def note(self, buf):
self._data = join([self._data, buf])
def code_block(self, buf, language=''):
return join(['```', language, '\n', self.list_depth(), buf, '\n', self.list_depth(), '```\n'])
def prettify_doc(self, doc):
punctuation_marks = ['.', '!', '?']
doc = doc.strip()
doc += '' if doc[-1:] in punctuation_marks else '.'
return doc
def italic(buf):
return join(['_', buf, '_'])
def bold(buf):
return join(['**', buf, '**'])
def code(buf):
return join(['`', buf, '`'])
def brackets(buf):
return join(['[', buf, ']'])
def parentheses(buf):
return join(['(', buf, ')'])
def small(buf):
return join(['<sub><sup>', buf, '</sup></sub>'])
def sub(buf):
return join(['<sub>', buf, '</sub>'])
def html_key(buf):
return join(['<a name="', buf, '"></a>'])
def color(col, buf):
return join(['<font color="', col, '">', buf, '</font>'])
def valid_dic_val(dic, value):
return value in dic and dic[value]
class YamlFile:
"""Yaml file class"""
def __init__(self, path):
self._path = path
with open(path) as yaml_file:
self.data = yaml.safe_load(yaml_file)
self.validate()
def validate(self):
# print('Validating ' + str(self._path.replace('\\', '/').split('/')[-1:][0]))
if self.data is None:
print('\n[ERROR] File: ' + self._path)
print("This file has no data:")
exit(0)
for module in self.data:
if 'module_name' in module and module['module_name'] is None:
print('\n[ERROR] File: ' + self._path)
print("'module_name' is empty in:")
exit(0)
if 'classes' in module:
if not module['classes']:
print('\n[ERROR] File: ' + self._path)
print("'classes' is empty in:")
exit(0)
for cl in module['classes']:
if 'class_name' in cl and cl['class_name'] is None:
print('\n[ERROR] File: ' + self._path)
print("'class_name' is empty in:")
exit(0)
if 'instance_variables' in cl and cl['instance_variables']:
for iv in cl['instance_variables']:
if 'var_name' not in iv:
print('\n[ERROR] File: ' + self._path)
print("'var_name' not found inside 'instance_variables' of class: " + cl['class_name'])
exit(0)
if 'var_name' in iv and iv['var_name'] is None:
print('\n[ERROR] File: ' + self._path)
print("'var_name' is empty in:")
exit(0)
if 'methods' in cl and cl['methods']:
for met in cl['methods']:
if 'def_name' not in met:
print('\n[ERROR] File: ' + self._path)
print("'def_name' not found inside 'methods' of class: " + cl['class_name'])
exit(0)
if 'def_name' in met and met['def_name'] is None:
print('\n[ERROR] File: ' + self._path)
print("'def_name' is empty in:")
exit(0)
if 'params' in met and met['params']:
for param in met['params']:
if 'param_name' not in param:
print('\n[ERROR] File: ' + self._path)
print("'param_name' not found inside 'params' of class: " + cl['class_name'])
exit(0)
if 'param_name' in param and param['param_name'] is None:
print('\n[ERROR] File: ' + self._path)
print("'param_name' is empty in:")
exit(0)
if 'type' in param and param['type'] is None:
print('\n[ERROR] File: ' + self._path)
print("'type' is empty in:")
exit(0)
def get_modules(self):
return [module for module in self.data]
def gen_stub_method_def(method):
"""Return python def as it should be written in stub files"""
param = ''
method_name = method['def_name']
for p in method['params']:
p_type = join([': ', str(p['type'])]) if 'type' in p else ''
default = join([' = ', str(p['default'])]) if 'default' in p else ''
param = join([param, p['param_name'], p_type, default, ', '])
param = param[:-2] # delete the last ', '
return_type = join([' -> ', method['return']]) if 'return' in method else ''
return join([method_name, parentheses(param), return_type])
def gen_doc_method_def(method, is_indx=False, with_self=True):
"""Return python def as it should be written in docs"""
param = ''
method_name = method['def_name']
if valid_dic_val(method, 'static'):
with_self = False
# to correclty render methods like __init__ in md
if method_name[0] == '_':
method_name = '\\' + method_name
if is_indx:
method_name = bold(method_name)
else:
method_name = bold(color(COLOR_METHOD, method_name))
if with_self:
if not 'params' in method or method['params'] is None:
method['params'] = []
method['params'].insert(0, {'param_name': 'self'})
if valid_dic_val(method, 'params'):
for p in method['params']:
default = join(['=', str(p['default'])]) if 'default' in p else ''
if is_indx:
param = join([param, bold(p['param_name']), default, ', '])
else:
param = join([param, color(COLOR_PARAM, bold(p['param_name']) + create_hyperlinks(default)), ', '])
if with_self:
method['params'] = method['params'][1:]
if not method['params']: # if is empty delete it
del method['params']
param = param[:-2] # delete the last ', '
return join([method_name, parentheses(param)])
def gen_inst_var_indx(inst_var, class_key):
inst_var_name = inst_var['var_name']
inst_var_key = join([class_key, inst_var_name], '.')
return join([
brackets(bold(inst_var_name)),
parentheses(inst_var_key), ' ',
sub(italic('Instance variable'))])
def gen_method_indx(method, class_key):
method_name = method['def_name']
method_key = join([class_key, method_name], '.')
method_def = gen_doc_method_def(method, True)
return join([
brackets(method_def),
parentheses(method_key), ' ',
sub(italic('Method'))])
def add_doc_method_param(md, param):
param_name = param['param_name']
param_type = ''
param_doc = ''
if valid_dic_val(param, 'type'):
param_type = create_hyperlinks(param['type'])
if valid_dic_val(param, 'doc'):
param_doc = create_hyperlinks(md.prettify_doc(param['doc']))
param_type = '' if not param_type else parentheses(italic(param_type))
md.list_push(code(param_name))
if param_type:
md.text(' ' + param_type)
if param_doc:
md.textn(' – ' + param_doc)
else:
md.new_line()
md.list_pop()
def add_doc_method(md, method, class_key):
method_name = method['def_name']
method_key = join([class_key, method_name], '.')
method_def = gen_doc_method_def(method, False)
md.list_pushn(join([html_key(method_key), method_def]))
# Method doc
if valid_dic_val(method, 'doc'):
md.textn(create_hyperlinks(md.prettify_doc(method['doc'])))
printed_title = False
if valid_dic_val(method, 'params'):
for param in method['params']:
# is_self = valid_dic_val(param, 'param_name') and param['param_name'] == 'self'
have_doc = valid_dic_val(param, 'doc')
have_type = valid_dic_val(param, 'type')
if not have_doc and not have_type:
continue
# Print the 'Parameters' title once
if not printed_title:
printed_title = True
md.list_push(bold('Parameters:') + '\n')
add_doc_method_param(md, param)
if printed_title:
md.list_pop()
# Return doc
if valid_dic_val(method, 'return'):
md.list_push(bold('Return:') + ' ')
md.textn(italic(create_hyperlinks(method['return'])))
md.list_pop()
# Note doc
if valid_dic_val(method, 'note'):
md.list_push(bold('Note:') + ' ')
md.textn(color(COLOR_NOTE, italic(create_hyperlinks(method['note']))))
md.list_pop()
# Warning doc
if valid_dic_val(method, 'warning'):
md.list_push(bold('Warning:') + ' ')
md.textn(color(COLOR_WARNING, italic(create_hyperlinks(method['warning']))))
md.list_pop()
# Raises error doc
if valid_dic_val(method, 'raises'):
md.list_pushn(bold('Raises:') + ' ' + method['raises'])
md.list_pop()
md.list_pop()
def add_doc_inst_var(md, inst_var, class_key):
var_name = inst_var['var_name']
var_key = join([class_key, var_name], '.')
var_type = ''
# Instance variable type
if valid_dic_val(inst_var, 'type'):
var_type = ' ' + parentheses(italic(create_hyperlinks(inst_var['type'])))
md.list_pushn(
html_key(var_key) +
bold(color(COLOR_INSTANCE_VAR, var_name)) +
var_type)
# Instance variable doc
if valid_dic_val(inst_var, 'doc'):
md.textn(create_hyperlinks(md.prettify_doc(inst_var['doc'])))
# Note doc
if valid_dic_val(inst_var, 'note'):
md.list_push(bold('Note:') + ' ')
md.textn(color(COLOR_NOTE, italic(create_hyperlinks(inst_var['note']))))
md.list_pop()
# Warning doc
if valid_dic_val(inst_var, 'warning'):
md.list_push(bold('Warning:') + ' ')
md.textn(color(COLOR_WARNING, italic(create_hyperlinks(inst_var['warning']))))
md.list_pop()
md.list_pop()
class Documentation:
"""Main documentation class"""
def __init__(self, path):
self._path = path
self._files = [f for f in os.listdir(path) if f.endswith('.yml')]
self._yamls = list()
for yaml_file in self._files:
self._yamls.append(YamlFile(os.path.join(path, yaml_file)))
# Merge same modules of different files
self.master_dict = dict()
for yaml_file in self._yamls:
for module in yaml_file.get_modules():
module_name = module['module_name']
if module_name not in self.master_dict:
self.master_dict[module_name] = module
elif valid_dic_val(module, 'classes'):
for new_module in module['classes']:
# Create the 'classes' key if does not exist already
if not valid_dic_val(self.master_dict[module_name], 'classes'):
self.master_dict[module_name]['classes'] = []
self.master_dict[module_name]['classes'].append(new_module)
def gen_overview(self):
"""Generates a referenced index for markdown file"""
md = MarkdownFile()
md.title(3, 'Overview')
for module_name in sorted(self.master_dict):
module = self.master_dict[module_name]
module_key = '#' + module_name
md.list_pushn(
brackets(bold(module_key[1:])) +
parentheses(module_key) + ' ' +
sub(italic('Module')))
# Generate class overview (if any)
if 'classes' in module and module['classes']:
for cl in sorted(module['classes']):
class_name = cl['class_name']
class_key = join([module_key, class_name], '.')
md.list_pushn(join([
brackets(bold(class_name)),
parentheses(class_key), ' ',
sub(italic('Class'))]))
# Generate class instance variables overview (if any)
if 'instance_variables' in cl and cl['instance_variables']:
for inst_var in cl['instance_variables']:
md.list_push(gen_inst_var_indx(inst_var, class_key))
md.list_popn()
# Generate class methods overview (if any)
if 'methods' in cl and cl['methods']:
for method in cl['methods']:
md.list_push(gen_method_indx(method, class_key))
md.list_popn()
md.list_pop()
md.list_pop()
return md.data()
def gen_body(self):
"""Generates the documentation body"""
md = MarkdownFile()
for module_name in sorted(self.master_dict):
module = self.master_dict[module_name]
module_key = module_name
# Generate class doc (if any)
if valid_dic_val(module, 'classes'):
for cl in sorted(module['classes'], key = lambda i: i['class_name']):
class_name = cl['class_name']
class_key = join([module_key, class_name], '.')
current_title = module_name+'.'+class_name
md.title(2, join([current_title,'<a name="'+current_title+'"></a>']))
inherits = ''
if valid_dic_val(cl, 'parent'):
inherits = italic(create_hyperlinks(cl['parent']))
md.inherit_join(inherits)
# Class main doc
if valid_dic_val(cl, 'doc'):
md.textn(create_hyperlinks(md.prettify_doc(cl['doc'])))
# Generate instance variable doc (if any)
if valid_dic_val(cl, 'instance_variables'):
md.title_html(3, 'Instance Variables')
for inst_var in cl['instance_variables']:
add_doc_inst_var(md, inst_var, class_key)
# Generate method doc (if any)
if valid_dic_val(cl, 'methods'):
md.title_html(3, 'Methods')
for method in cl['methods']:
add_doc_method(md, method, class_key)
md.separator()
return md.data().strip()
def gen_markdown(self):
"""Generates the whole markdown file"""
return join([self.gen_body()], '\n').strip()
def main():
"""Main function"""
print("Generating PythonAPI documentation...")
script_path = os.path.dirname(os.path.abspath(__file__))
docs = Documentation(script_path)
with open(os.path.join(script_path, '../../Docs/python_api.md'), 'w') as md_file:
md_file.write(docs.gen_markdown())
print("Done!")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
import os
from handlers import Passport, Verify, VerifyCode, House
from tornado.web import RequestHandler
# , StaticFileHandler
# 静态文件用到StaticFileHandle,这里使用继承加入了xsrf校验
from handlers.BaseHandler import MyStaticFileHandler
# 获取tornado项目的根目录的绝对路径
current_path = os.path.dirname(__file__)
handlers = [
(r'/test', Passport.IndexHandler),
(r'/api/piccode', Verify.ImageCodeHandler),
(r'/api/smscode', Verify.SMSCodeHandler),
(r'/api/register', Passport.RegisterHandler),
(r'/api/login', Passport.LoginHandler),
(r'/api/check_login', Passport.CheckLoginHandler),
(r'/api/logout', Passport.CheckLoginHandler),
(r'/api/profile/avatar', Passport.AvatarHandler),
(r'/api/house/area', House.AreaInfoHandler),
(r'/api/house/my', House.MyHousehandler),
(r'/api/house/info', House.HouseInfoHandler),
(r'^/(.*)$', MyStaticFileHandler, {
'path': os.path.join(current_path, 'html'),
'default_filename': 'index.html'
})
]
|
nilq/baby-python
|
python
|
import copy
import functools
import json
import logging
from collections import defaultdict
from multiprocessing import Pool
from tempfile import NamedTemporaryFile
from openff.qcsubmit.results import (
OptimizationResultCollection,
)
from openff.qcsubmit.results.filters import (
ConnectivityFilter,
ElementFilter,
LowestEnergyFilter,
RecordStatusFilter,
ResultRecordFilter,
)
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.toolkit.utils import UndefinedStereochemistryError
from qcportal import FractalClient
from qcportal.models import TorsionDriveRecord
from qcportal.models.records import RecordStatusEnum
from tqdm import tqdm
class UndefinedStereoFilter(ResultRecordFilter):
def _filter_function(self, result, record, molecule) -> bool:
has_stereochemistry = True
molecule = copy.deepcopy(molecule)
molecule._conformers = [molecule.conformers[0]]
try:
with NamedTemporaryFile(suffix=".sdf") as file:
molecule.to_file(file.name, "SDF")
molecule.from_file(file.name)
except UndefinedStereochemistryError:
has_stereochemistry = False
return has_stereochemistry
def label_ids(record_and_molecule, force_field, parameter_types):
record, molecule = record_and_molecule
full_labels = force_field.label_molecules(molecule.to_topology())[0]
parameter_ids = set()
for parameter_type in parameter_types:
parameter_labels = full_labels[parameter_type]
for indices, parameter in parameter_labels.items():
if isinstance(record, TorsionDriveRecord) and {*indices[1:3]} != {
*record.keywords.dihedrals[0][1:3]
}:
continue
parameter_ids.add(parameter.id)
return [*parameter_ids]
def select_parameters(training_set, parameter_types, output_path):
# Print out coverage information.
force_field = ForceField("openff-1.3.0.offxml")
coverage = defaultdict(int)
with Pool(16) as pool:
for parameter_ids in tqdm(
pool.imap(
functools.partial(
label_ids, force_field=force_field, parameter_types=parameter_types
),
training_set.to_records(),
),
total=training_set.n_results,
):
for parameter_id in parameter_ids:
coverage[parameter_id] += 1
# Save out the SMIRKS which should be trained against this set.
with open(output_path, "w") as file:
selected_parameters = defaultdict(list)
for parameter_type in parameter_types:
for parameter_id, count in coverage.items():
found_parameters = force_field.get_parameter_handler(
parameter_type
).get_parameter({"id": parameter_id})
if count < 5 or len(found_parameters) == 0:
continue
selected_parameters[parameter_type].append(found_parameters[0].smirks)
json.dump(selected_parameters, file)
def main():
logging.basicConfig(level=logging.INFO)
optimization_set = OptimizationResultCollection.from_server(
client=FractalClient(),
datasets=[
"OpenFF Gen 2 Opt Set 1 Roche",
"OpenFF Gen 2 Opt Set 2 Coverage",
"OpenFF Gen 2 Opt Set 3 Pfizer Discrepancy",
"OpenFF Gen 2 Opt Set 4 eMolecules Discrepancy",
"OpenFF Gen 2 Opt Set 5 Bayer",
],
spec_name="default",
)
optimization_set = optimization_set.filter(
RecordStatusFilter(status=RecordStatusEnum.complete),
ConnectivityFilter(tolerance=1.2),
UndefinedStereoFilter(),
ElementFilter(
# The elements supported by SMIRNOFF
allowed_elements=["H", "C", "N", "O", "S", "P", "F", "Cl", "Br", "I"]
),
LowestEnergyFilter(),
)
with open("data-sets/1-2-0-opt-set-v2.json", "w") as file:
file.write(optimization_set.json())
select_parameters(
optimization_set,
parameter_types=["Bonds", "Angles"],
output_path="data-sets/1-2-0-opt-set-v2-valence-smirks.json",
)
hessian_set = optimization_set.to_basic_result_collection(driver="hessian")
with open("data-sets/1-2-0-hess-set-v2.json", "w") as file:
file.write(hessian_set.json())
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""
Defines sets of featurizers to be used by automatminer during featurization.
Featurizer sets are classes with attributes containing lists of featurizers.
For example, the set of all fast structure featurizers could be found with::
StructureFeaturizers().fast
"""
import matminer.featurizers.composition as cf
import matminer.featurizers.structure as sf
import matminer.featurizers.dos as dosf
import matminer.featurizers.bandstructure as bf
try:
import torch
import cgcnn
except ImportError:
torch, cgcnn = None, None
try:
import dscribe
except ImportError:
dscribe = None
__authors__ = ["Alex Dunn", "Alex Ganose"]
class FeaturizerSet:
"""Abstract class for defining sets of featurizers.
All FeaturizerSets should implement at least two sets of featurizers, best
and all. The set of best featurizers should contain those featurizers
that balance speed, applicability and usefulness. This should be determined
by the implementor.
Each set returned is a list of matminer featurizer objects.
Args:
exclude (list of str, optional): A list of featurizer class names that
will be excluded from the set of featurizers returned.
"""
def __init__(self, exclude=None):
self.exclude = exclude if exclude else []
def __call__(self, *args, **kwargs):
return self.all
@property
def best(self):
"""List of featurizers providing useful features in a reasonable time.
Featurizers that take a very long time to run, which crash for many
systems, or which produce a large number of similar features will be
excluded.
"""
raise NotImplementedError("This featurizer set must return a set of "
"best featurizers")
@property
def all(self):
"""All featurizers available for this featurization type."""
raise NotImplementedError("This featurizer set must return a set of "
"all featurizers")
@property
def fast(self):
"""Fast featurizers available for this featurization type."""
raise NotImplementedError("This featurizer set must return a set of "
"fast featurizers")
def _get_featurizers(self, featurizers):
"""Utility function for getting featurizers not in the ignore list."""
return [f for f in featurizers
if f.__class__.__name__ not in self.exclude]
class AllFeaturizers(FeaturizerSet):
"""Featurizer set containing all available featurizers.
This class provides subsets for composition, structure, density of states
and band structure based featurizers. Additional sets containing all
featurizers and the set of best featurizers are provided.
Example usage::
composition_featurizers = AllFeaturizers().composition
Args:
exclude (list of str, optional): A list of featurizer class names that
will be excluded from the set of featurizers returned.
"""
def __init__(self, exclude=None):
super(AllFeaturizers, self).__init__(exclude=exclude)
self._featurizer_sets = {
'comp': CompositionFeaturizers(),
'struct': StructureFeaturizers(),
'bs': BSFeaturizers(),
'dos': DOSFeaturizers()
}
@property
def composition(self):
"""List of all composition based featurizers."""
return self._get_featurizers(self._featurizer_sets['comp'].all)
@property
def structure(self):
"""List of all structure based featurizers."""
return self._get_featurizers(self._featurizer_sets['struct'].all)
@property
def bandstructure(self):
"""List of all band structure based featurizers."""
return self._get_featurizers(self._featurizer_sets['bs'].all)
@property
def dos(self):
"""List of all density of states based featurizers."""
return self._get_featurizers(self._featurizer_sets['dos'].all)
@property
def all(self):
featurizers = [f.all for f in self._featurizer_sets.values()]
return self._get_featurizers(featurizers)
@property
def best(self):
featurizers = [f.best for f in self._featurizer_sets.values()]
return self._get_featurizers(featurizers)
@property
def fast(self):
featurizers = [f.fast for f in self._featurizer_sets.values()]
return self._get_featurizers(featurizers)
class CompositionFeaturizers(FeaturizerSet):
"""Featurizer set containing composition featurizers.
This class provides subsets for featurizers that require the composition
to have oxidation states, as well as fast, and slow featurizers. Additional
sets containing all featurizers and the set of best featurizers are
provided.
Example usage::
fast_featurizers = CompositionFeaturizers().fast
Args:
exclude (list of str, optional): A list of featurizer class names that
will be excluded from the set of featurizers returned.
"""
def __init__(self, exclude=None):
super(CompositionFeaturizers, self).__init__(exclude=exclude)
self._fast_featurizers = [
cf.AtomicOrbitals(),
cf.ElementProperty.from_preset("matminer"),
cf.ElementProperty.from_preset("magpie"),
cf.ElementProperty.from_preset("matscholar_el"),
cf.ElementProperty.from_preset("deml"),
cf.ElementFraction(),
cf.Stoichiometry(),
cf.TMetalFraction(),
cf.BandCenter(),
cf.ValenceOrbital()
]
self._slow_featurizers = [
cf.Miedema(),
cf.AtomicPackingEfficiency(), # slower than the rest
cf.CohesiveEnergy() # requires mpid present
]
self._need_oxi_featurizers = [
cf.YangSolidSolution(),
cf.CationProperty.from_preset(preset_name='deml'),
cf.OxidationStates.from_preset(preset_name='deml'),
cf.ElectronAffinity(),
cf.ElectronegativityDiff(),
cf.IonProperty()
]
self._intermetallics_only = [
cf.YangSolidSolution(),
cf.Miedema(),
]
@property
def intermetallics_only(self):
"""List of featurizers that applies only to intermetallics.
Will probably be removed by valid_fraction checking if not actally
applicable to the dataset.
"""
return self._get_featurizers(self._intermetallics_only)
@property
def fast(self):
"""List of featurizers that are generally quick to featurize."""
return self._get_featurizers(self._fast_featurizers)
@property
def slow(self):
"""List of featurizers that are generally slow to featurize."""
return self._get_featurizers(self._slow_featurizers)
@property
def need_oxi(self):
"""Featurizers that require the composition to have oxidation states.
If the composition is not decorated with oxidation states the
oxidation states will be guessed. This can cause a significant increase
in featurization time.
"""
return self._get_featurizers(self._need_oxi_featurizers)
@property
def all(self):
"""List of all composition based featurizers."""
return self.fast + self.need_oxi + self.slow
@property
def best(self):
return self.fast + self.intermetallics_only
class StructureFeaturizers(FeaturizerSet):
"""Featurizer set containing structure featurizers.
This class provides subsets for featurizers that require fitting,
return matrices rather than vectors, and produce many features, as well as
fast, and slow featurizers. Additional sets containing all featurizers and
the set of best featurizers are provided.
Example usage::
fast_featurizers = StructureFeaturizers().fast
Args:
exclude (list of str, optional): A list of featurizer class names that
will be excluded from the set of featurizers returned.
"""
def __init__(self, exclude=None):
super(StructureFeaturizers, self).__init__(exclude=exclude)
self._fast_featurizers = [
sf.DensityFeatures(),
sf.GlobalSymmetryFeatures(),
sf.EwaldEnergy(),
sf.CoulombMatrix(flatten=True),
sf.SineCoulombMatrix(flatten=True)
]
ssf = sf.SiteStatsFingerprint
self._slow_featurizers = [
ssf.from_preset('CrystalNNFingerprint_ops'),
ssf.from_preset("BondLength-dejong2016"),
ssf.from_preset("BondAngle-dejong2016"),
ssf.from_preset("Composition-dejong2016_SD"),
ssf.from_preset("Composition-dejong2016_AD"),
ssf.from_preset("CoordinationNumber_ward-prb-2017"),
ssf.from_preset("LocalPropertyDifference_ward-prb-2017"),
sf.ChemicalOrdering(),
sf.StructuralHeterogeneity(),
sf.MaximumPackingEfficiency(),
sf.XRDPowderPattern(),
sf.Dimensionality(),
sf.OrbitalFieldMatrix(flatten=True),
sf.JarvisCFID(),
]
# Prevent import errors
self._require_external = []
if torch and cgcnn:
self._require_external.append(sf.CGCNNFeaturizer())
if dscribe:
self._require_external.append(sf.SOAP())
self._need_fitting_featurizers = [
sf.PartialRadialDistributionFunction(),
sf.BondFractions(),
sf.BagofBonds(coulomb_matrix=sf.CoulombMatrix()),
sf.BagofBonds(coulomb_matrix=sf.SineCoulombMatrix()),
]
self._matrix_featurizers = [
sf.RadialDistributionFunction(), # returns dict
sf.MinimumRelativeDistances(), # returns a list
sf.ElectronicRadialDistributionFunction()
]
# these are the same as _need_fitting_featurizers
self._many_features_featurizers = [
sf.PartialRadialDistributionFunction(),
sf.BondFractions(approx_bonds=False),
sf.BagofBonds(coulomb_matrix=sf.CoulombMatrix()),
sf.BagofBonds(coulomb_matrix=sf.SineCoulombMatrix()),
sf.OrbitalFieldMatrix(flatten=True),
sf.JarvisCFID()
]
@property
def fast(self):
"""List of featurizers that are generally fast to featurize."""
return self._get_featurizers(self._fast_featurizers)
@property
def slow(self):
"""List of featurizers that are generally slow to featurize."""
return self._get_featurizers(self._slow_featurizers)
@property
def need_fit(self):
"""List of featurizers which must be fit before featurizing.
Fitting can be performed using the `Featurizer.fit()` method.
Alternatively, the `Featurizer.fit_featurize_dataframe()` can be used
to fit and featurize simultaneously.
"""
return self._get_featurizers(self._need_fitting_featurizers)
@property
def matrix(self):
"""List of featurizers that return matrices as features.
These featurizers are not useful for vectorized representations of
crystal structures.
"""
return self._get_featurizers(self._matrix_featurizers)
@property
def many_features(self):
"""List of featurizers that return many features."""
return self._get_featurizers(self._many_features_featurizers)
@property
def require_external(self):
"""Featurizers which require external software not installable via
Pypi
"""
return self._get_featurizers(self._require_external)
@property
def all_vector(self):
return self.fast + self.slow + self.need_fit + self.require_external
@property
def all(self):
return self.all_vector
@property
def all_including_matrix(self):
"""List of all structure based featurizers."""
return self.all_vector + self.matrix
@property
def best(self):
return self.fast + self.slow + self.require_external
class DOSFeaturizers(FeaturizerSet):
"""Featurizer set containing density of states featurizers.
This class provides subsets all featurizers and the set of best featurizers.
Example usage::
dos_featurizers = DOSFeaturizers().best
Args:
exclude (list of str, optional): A list of featurizer class names that
will be excluded from the set of featurizers returned.
"""
def __init__(self, exclude=None):
super(DOSFeaturizers, self).__init__(exclude=exclude)
# Best featurizers work on the entire DOS
self._best_featurizers = [
dosf.DOSFeaturizer(),
dosf.DopingFermi(),
dosf.Hybridization(),
dosf.DosAsymmetry()
]
self._site_featurizers = [dosf.SiteDOS()]
@property
def all(self):
"""List of all density of states based featurizers."""
return self.best + self.site
@property
def best(self):
return self._get_featurizers(self._best_featurizers)
@property
def fast(self):
return self._get_featurizers(self._best_featurizers)
@property
def site(self):
return self._get_featurizers(self._site_featurizers)
class BSFeaturizers(FeaturizerSet):
"""Featurizer set containing band structure featurizers.
This class provides subsets all featurizers and the set of best featurizers.
Example usage::
bs_featurizers = BSFeaturizers().best
Args:
exclude (list of str, optional): A list of featurizer class names that
will be excluded from the set of featurizers returned.
"""
def __init__(self, exclude=None):
super(BSFeaturizers, self).__init__(exclude=exclude)
self._best_featurizers = [
bf.BandFeaturizer(),
bf.BranchPointEnergy(),
]
@property
def all(self):
"""List of all band structure based featurizers."""
return self.best
@property
def best(self):
return self._get_featurizers(self._best_featurizers)
@property
def fast(self):
return self._get_featurizers(self._best_featurizers)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Natural atomic orbitals
Ref:
F. Weinhold et al., J. Chem. Phys. 83(1985), 735-746
'''
import sys
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.gto import mole
from pyscf.lo import orth
from pyscf.lib import logger
# Note the valence space for Li, Be may need include 2p, Al..Cl may need 3d ...
AOSHELL = [
# This is No. of shells, not the atomic configuations
# core core+valence
# core+valence = lambda nuc, l: \
# int(numpy.ceil(pyscf.lib.parameters.ELEMENTS[nuc][2][l]/(4*l+2.)))
['0s0p0d0f', '0s0p0d0f'], # 0 GHOST
['0s0p0d0f', '1s0p0d0f'], # 1 H
['0s0p0d0f', '1s0p0d0f'], # 2 He
['1s0p0d0f', '2s0p0d0f'], # 3 Li
['1s0p0d0f', '2s0p0d0f'], # 4 Be
['1s0p0d0f', '2s1p0d0f'], # 5 B
['1s0p0d0f', '2s1p0d0f'], # 6 C
['1s0p0d0f', '2s1p0d0f'], # 7 N
['1s0p0d0f', '2s1p0d0f'], # 8 O
['1s0p0d0f', '2s1p0d0f'], # 9 F
['1s0p0d0f', '2s1p0d0f'], # 10 Ne
['2s1p0d0f', '3s1p0d0f'], # 11 Na
['2s1p0d0f', '3s1p0d0f'], # 12 Mg
['2s1p0d0f', '3s2p0d0f'], # 13 Al
['2s1p0d0f', '3s2p0d0f'], # 14 Si
['2s1p0d0f', '3s2p0d0f'], # 15 P
['2s1p0d0f', '3s2p0d0f'], # 16 S
['2s1p0d0f', '3s2p0d0f'], # 17 Cl
['2s1p0d0f', '3s2p0d0f'], # 18 Ar
['3s2p0d0f', '4s2p0d0f'], # 19 K
['3s2p0d0f', '4s2p0d0f'], # 20 Ca
['3s2p0d0f', '4s2p1d0f'], # 21 Sc
['3s2p0d0f', '4s2p1d0f'], # 22 Ti
['3s2p0d0f', '4s2p1d0f'], # 23 V
['3s2p0d0f', '4s2p1d0f'], # 24 Cr
['3s2p0d0f', '4s2p1d0f'], # 25 Mn
['3s2p0d0f', '4s2p1d0f'], # 26 Fe
['3s2p0d0f', '4s2p1d0f'], # 27 Co
['3s2p0d0f', '4s2p1d0f'], # 28 Ni
['3s2p0d0f', '4s2p1d0f'], # 29 Cu
['3s2p0d0f', '4s2p1d0f'], # 30 Zn
['3s2p1d0f', '4s3p1d0f'], # 31 Ga
['3s2p1d0f', '4s3p1d0f'], # 32 Ge
['3s2p1d0f', '4s3p1d0f'], # 33 As
['3s2p1d0f', '4s3p1d0f'], # 34 Se
['3s2p1d0f', '4s3p1d0f'], # 35 Br
['3s2p1d0f', '4s3p1d0f'], # 36 Kr
['4s3p1d0f', '5s3p1d0f'], # 37 Rb
['4s3p1d0f', '5s3p1d0f'], # 38 Sr
['4s3p1d0f', '5s3p2d0f'], # 39 Y
['4s3p1d0f', '5s3p2d0f'], # 40 Zr
['4s3p1d0f', '5s3p2d0f'], # 41 Nb
['4s3p1d0f', '5s3p2d0f'], # 42 Mo
['4s3p1d0f', '5s3p2d0f'], # 43 Tc
['4s3p1d0f', '5s3p2d0f'], # 44 Ru
['4s3p1d0f', '5s3p2d0f'], # 45 Rh
['4s3p1d0f', '4s3p2d0f'], # 46 Pd
['4s3p1d0f', '5s3p2d0f'], # 47 Ag
['4s3p1d0f', '5s3p2d0f'], # 48 Cd
['4s3p2d0f', '5s4p2d0f'], # 49 In
['4s3p2d0f', '5s4p2d0f'], # 50 Sn
['4s3p2d0f', '5s4p2d0f'], # 51 Sb
['4s3p2d0f', '5s4p2d0f'], # 52 Te
['4s3p2d0f', '5s4p2d0f'], # 53 I
['4s3p2d0f', '5s4p2d0f'], # 54 Xe
['5s4p2d0f', '6s4p2d0f'], # 55 Cs
['5s4p2d0f', '6s4p2d0f'], # 56 Ba
['5s4p2d0f', '6s4p3d0f'], # 57 La
['5s4p2d0f', '6s4p3d1f'], # 58 Ce
['5s4p2d0f', '6s4p2d1f'], # 59 Pr
['5s4p2d0f', '6s4p2d1f'], # 60 Nd
['5s4p2d0f', '6s4p2d1f'], # 61 Pm
['5s4p2d0f', '6s4p2d1f'], # 62 Sm
['5s4p2d0f', '6s4p2d1f'], # 63 Eu
['5s4p2d0f', '6s4p3d1f'], # 64 Gd
['5s4p2d0f', '6s4p3d1f'], # 65 Tb
['5s4p2d0f', '6s4p2d1f'], # 66 Dy
['5s4p2d0f', '6s4p2d1f'], # 67 Ho
['5s4p2d0f', '6s4p2d1f'], # 68 Er
['5s4p2d0f', '6s4p2d1f'], # 69 Tm
['5s4p2d0f', '6s4p2d1f'], # 70 Yb
['5s4p2d1f', '6s4p3d1f'], # 71 Lu
['5s4p2d1f', '6s4p3d1f'], # 72 Hf
['5s4p2d1f', '6s4p3d1f'], # 73 Ta
['5s4p2d1f', '6s4p3d1f'], # 74 W
['5s4p2d1f', '6s4p3d1f'], # 75 Re
['5s4p2d1f', '6s4p3d1f'], # 76 Os
['5s4p2d1f', '6s4p3d1f'], # 77 Ir
['5s4p2d1f', '6s4p3d1f'], # 78 Pt
['5s4p2d1f', '6s4p3d1f'], # 79 Au
['5s4p2d1f', '6s4p3d1f'], # 80 Hg
['5s4p3d1f', '6s5p3d1f'], # 81 Tl
['5s4p3d1f', '6s5p3d1f'], # 82 Pb
['5s4p3d1f', '6s5p3d1f'], # 83 Bi
['5s4p3d1f', '6s5p3d1f'], # 84 Po
['5s4p3d1f', '6s5p3d1f'], # 85 At
['5s4p3d1f', '6s5p3d1f'], # 86 Rn
['6s5p3d1f', '7s5p3d1f'], # 87 Fr
['6s5p3d1f', '7s5p3d1f'], # 88 Ra
['6s5p3d1f', '7s5p4d1f'], # 89 Ac
['6s5p3d1f', '7s5p4d1f'], # 90 Th
['6s5p3d1f', '7s5p4d2f'], # 91 Pa
['6s5p3d1f', '7s5p4d2f'], # 92 U
['6s5p3d1f', '7s5p4d2f'], # 93 Np
['6s5p3d1f', '7s5p3d2f'], # 94 Pu
['6s5p3d1f', '7s5p3d2f'], # 95 Am
['6s5p3d1f', '7s5p4d2f'], # 96 Cm
['6s5p3d1f', '7s5p4d2f'], # 97 Bk
['6s5p3d1f', '7s5p3d2f'], # 98 Cf
['6s5p3d1f', '7s5p3d2f'], # 99 Es
['6s5p3d1f', '7s5p3d2f'], #100 Fm
['6s5p3d1f', '7s5p3d2f'], #101 Md
['6s5p3d1f', '7s5p3d2f'], #102 No
['6s5p3d2f', '7s5p4d2f'], #103 Lr
['6s5p3d2f', '7s5p4d2f'], #104 Rf
['6s5p3d2f', '7s5p4d2f'], #105 Db
['6s5p3d2f', '7s5p4d2f'], #106 Sg
['6s5p3d2f', '7s5p4d2f'], #107 Bh
['6s5p3d2f', '7s5p4d2f'], #108 Hs
['6s5p3d2f', '7s5p4d2f'], #109 Mt
['6s5p3d2f', '7s5p4d2f'], #110 E110
['6s5p3d2f', '7s5p4d2f'], #111 E111
['6s5p3d2f', '7s5p4d2f'], #112 E112
['6s5p4d2f', '7s6p4d2f'], #113 E113
['6s5p4d2f', '7s6p4d2f'], #114 E114
['6s5p4d2f', '7s6p4d2f'], #115 E115
['6s5p4d2f', '7s6p4d2f'], #116 E116
['6s3p4d2f', '7s6p4d2f'], #117 E117
['6s3p4d2f', '7s6p4d2f'] #118 E118
]
def prenao(mol, dm):
s = mol.intor_symmetric('int1e_ovlp')
p = reduce(numpy.dot, (s, dm, s))
return _prenao_sub(mol, p, s)[1]
def nao(mol, mf, s=None, restore=True):
if s is None:
s = mol.intor_symmetric('int1e_ovlp')
dm = mf.make_rdm1()
p = reduce(numpy.dot, (s, dm, s))
pre_occ, pre_nao = _prenao_sub(mol, p, s)
cnao = _nao_sub(mol, pre_occ, pre_nao)
if restore:
# restore natural character
p_nao = reduce(numpy.dot, (cnao.T, p, cnao))
s_nao = numpy.eye(p_nao.shape[0])
cnao = numpy.dot(cnao, _prenao_sub(mol, p_nao, s_nao)[1])
return cnao
def _prenao_sub(mol, p, s):
ao_loc = mol.ao_loc_nr()
nao = ao_loc[-1]
occ = numpy.zeros(nao)
cao = numpy.zeros((nao,nao), dtype=s.dtype)
bas_ang = mol._bas[:,mole.ANG_OF]
for ia, (b0,b1,p0,p1) in enumerate(mol.aoslice_by_atom(ao_loc)):
l_max = bas_ang[b0:b1].max()
for l in range(l_max+1):
idx = []
for ib in numpy.where(bas_ang[b0:b1] == l)[0]:
idx.append(numpy.arange(ao_loc[b0+ib], ao_loc[b0+ib+1]))
idx = numpy.hstack(idx)
if idx.size < 1:
continue
if mol.cart:
degen = (l + 1) * (l + 2) // 2
else:
degen = l * 2 + 1
p_frag = _spheric_average_mat(p, l, idx, degen)
s_frag = _spheric_average_mat(s, l, idx, degen)
e, v = scipy.linalg.eigh(p_frag, s_frag)
e = e[::-1]
v = v[:,::-1]
idx = idx.reshape(-1,degen)
for k in range(degen):
ilst = idx[:,k]
occ[ilst] = e
for i,i0 in enumerate(ilst):
cao[i0,ilst] = v[i]
return occ, cao
def _nao_sub(mol, pre_occ, pre_nao, s=None):
if s is None:
s = mol.intor_symmetric('int1e_ovlp')
core_lst, val_lst, rydbg_lst = _core_val_ryd_list(mol)
nbf = mol.nao_nr()
pre_nao = pre_nao.astype(s.dtype)
cnao = numpy.empty((nbf,nbf), dtype=s.dtype)
if core_lst:
c = pre_nao[:,core_lst].copy()
s1 = reduce(lib.dot, (c.conj().T, s, c))
cnao[:,core_lst] = c1 = lib.dot(c, orth.lowdin(s1))
c = pre_nao[:,val_lst].copy()
c -= reduce(lib.dot, (c1, c1.conj().T, s, c))
else:
c = pre_nao[:,val_lst]
if val_lst:
s1 = reduce(lib.dot, (c.conj().T, s, c))
wt = pre_occ[val_lst]
cnao[:,val_lst] = lib.dot(c, orth.weight_orth(s1, wt))
if rydbg_lst:
cvlst = core_lst + val_lst
c1 = cnao[:,cvlst].copy()
c = pre_nao[:,rydbg_lst].copy()
c -= reduce(lib.dot, (c1, c1.conj().T, s, c))
s1 = reduce(lib.dot, (c.conj().T, s, c))
cnao[:,rydbg_lst] = lib.dot(c, orth.lowdin(s1))
snorm = numpy.linalg.norm(reduce(lib.dot, (cnao.conj().T, s, cnao)) - numpy.eye(nbf))
if snorm > 1e-9:
logger.warn(mol, 'Weak orthogonality for localized orbitals %s', snorm)
return cnao
def _core_val_ryd_list(mol):
from pyscf.gto.ecp import core_configuration
count = numpy.zeros((mol.natm, 9), dtype=int)
core_lst = []
val_lst = []
rydbg_lst = []
k = 0
for ib in range(mol.nbas):
ia = mol.bas_atom(ib)
# Avoid calling mol.atom_charge because we should include ECP core electrons here
nuc = mole._charge(mol.atom_symbol(ia))
l = mol.bas_angular(ib)
nc = mol.bas_nctr(ib)
symb = mol.atom_symbol(ia)
nelec_ecp = mol.atom_nelec_core(ia)
ecpcore = core_configuration(nelec_ecp)
coreshell = [int(x) for x in AOSHELL[nuc][0][::2]]
cvshell = [int(x) for x in AOSHELL[nuc][1][::2]]
if mol.cart:
deg = (l + 1) * (l + 2) // 2
else:
deg = 2 * l + 1
for n in range(nc):
if l > 3:
rydbg_lst.extend(range(k, k+deg))
elif ecpcore[l]+count[ia,l]+n < coreshell[l]:
core_lst.extend(range(k, k+deg))
elif ecpcore[l]+count[ia,l]+n < cvshell[l]:
val_lst.extend(range(k, k+deg))
else:
rydbg_lst.extend(range(k, k+deg))
k = k + deg
count[ia,l] += nc
return core_lst, val_lst, rydbg_lst
def _spheric_average_mat(mat, l, lst, degen=None):
if degen is None:
degen = l * 2 + 1
nd = len(lst) // degen
mat_frag = mat[lst][:,lst].reshape(nd,degen,nd,degen)
return numpy.einsum('imjn->ij', mat_frag) / degen
def set_atom_conf(element, description):
'''Change the default atomic core and valence configuration to the one
given by "description".
See lo.nao.AOSHELL for the default configuration.
Args:
element : str or int
Element symbol or nuclear charge
description : str or a list of str
| "double p" : double p shell
| "double d" : double d shell
| "double f" : double f shell
| "polarize" : add one polarized shell
| "1s1d" : keep core unchanged and set 1 s 1 d shells for valence
| ("3s2p","1d") : 3 s, 2 p shells for core and 1 d shells for valence
'''
charge = mole._charge(element)
def to_conf(desc):
desc = desc.replace(' ','').replace('-','').replace('_','').lower()
if "doublep" in desc:
desc = '2p'
elif "doubled" in desc:
desc = '2d'
elif "doublef" in desc:
desc = '2f'
elif "polarize" in desc:
loc = AOSHELL[charge][1].find('0')
desc = '1' + AOSHELL[charge][1][loc+1]
return desc
if isinstance(description, str):
c_desc, v_desc = AOSHELL[charge][0], to_conf(description)
else:
c_desc, v_desc = to_conf(description[0]), to_conf(description[1])
ncore = [int(x) for x in AOSHELL[charge][0][::2]]
ncv = [int(x) for x in AOSHELL[charge][1][::2]]
for i, s in enumerate(('s', 'p', 'd', 'f')):
if s in c_desc:
ncore[i] = int(c_desc.split(s)[0][-1])
if s in v_desc:
ncv[i] = ncore[i] + int(v_desc.split(s)[0][-1])
c_conf = '%ds%dp%dd%df' % tuple(ncore)
cv_conf = '%ds%dp%dd%df' % tuple(ncv)
AOSHELL[charge] = [c_conf, cv_conf]
sys.stderr.write('Update %s conf: core %s core+valence %s\n' %
(element, c_conf, cv_conf))
if __name__ == "__main__":
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 1
mol.output = 'out_nao'
mol.atom.extend([
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ])
mol.basis = {'H': '6-31g',
'O': '6-31g',}
mol.build()
mf = scf.RHF(mol)
mf.scf()
s = mol.intor_symmetric('int1e_ovlp_sph')
p = reduce(numpy.dot, (s, mf.make_rdm1(), s))
o0, c0 = _prenao_sub(mol, p, s)
print(o0)
print(abs(c0).sum() - 21.848915907988854)
c = nao(mol, mf)
print(reduce(numpy.dot, (c.T, p, c)).diagonal())
print(_core_val_ryd_list(mol))
set_atom_conf('Fe', '1s1d') # core 3s2p0d0f core+valence 4s2p1d0f
set_atom_conf('Fe', 'double d') # core 3s2p0d0f core+valence 4s2p2d0f
set_atom_conf('Fe', 'double p') # core 3s2p0d0f core+valence 4s4p2d0f
set_atom_conf('Fe', 'polarize') # core 3s2p0d0f core+valence 4s4p2d1f
|
nilq/baby-python
|
python
|
import requests
import sys
import os
import re
import csv
from urlparse import urljoin
from bs4 import BeautifulSoup
import urllib
from pprint import pprint
class Movies(object):
def __init__(self, args):
self.movies = []
if len(args) == 0:
print 'No Argument given'
#TODO: raise exception or something
return
if args[0] == '-d':
args = self.get_movies_from_directories(args[1:])
self.search_movie_names(args)
self.to_csv()
def get_movies_from_directories(self, dirs):
#TODO: dirs is a list of all directories
#handle errors if directories doesn't exist
#look only for video formats maybe - mp4, avi, etc, etc
result = []
for directory in dirs:
try:
os.chdir(os.path.expanduser(directory))
except Exception as e:
print Exception
continue
files = os.listdir('.')
for file_name in files:
if os.path.isfile(file_name):
file_name = re.sub('[.][^.]*$','*', file_name)
result.append(self.__purify_name(file_name))
return result
def __purify_name(self, name):
year_match = re.search('\W([0-9]){4}\W', name)
year = name[year_match.start():year_match.end()] if year_match else ''
name = re.sub('\((.*)\)|\[(.*)\]|\{(.*)\}','', name)
name = re.sub('\W',' ', name)
return name + year
def search_movie_names(self, args):
for item in args:
#TODO: purify_name(item)
search_term = urllib.quote_plus(item)
url = 'http://www.imdb.com/find?q=' + search_term + '&s=all'
bs = BeautifulSoup(requests.get(url).content, "html.parser")
try:
url_new = urljoin(url,bs.find(
'td', attrs={'class':'result_text'}).find('a').get('href'))
movie_dict = self.extract_movie_info(url_new)
except:
print ('No Result Found. searched: ', search_term, item)
movie_dict = self.extract_movie_info()
movie_dict['original_name'] = item
movie_dict['search_term'] = search_term
self.movies.append(movie_dict)
pprint(movie_dict)
return True
def extract_movie_info(self, url=None):
if not url:
return { 'name': '', 'rating': '', 'summary': '', 'genre': '', }
response = requests.get(url).content
bs = BeautifulSoup(response, "html.parser")
name = bs.find('h1', attrs={'itemprop':'name'}).text.encode('utf-8')
try:
rating = bs.find('span', attrs={'itemprop':'ratingValue'}).text
except:
rating = '-'
try:
summary = bs.find('div', attrs={'class':'summary_text'}).text.strip().encode('utf-8')
except:
summary = '-'
try:
genre = bs.find('span', attrs={'itemprop':'genre'}).text.encode('utf-8')
except:
genre = '-'
return {
'name': name,
'rating': rating,
'summary': summary,
'genre': genre,
}
def to_csv(self):
f = csv.writer(open('movies_list.csv', 'wb+'))
f.writerow(['original_name','name', 'rating', 'genre', 'summary'])
for item in self.movies:
f.writerow([item['original_name'], item['name'], item['rating'],
item['genre'], item['summary']])
def main():
obj = Movies(sys.argv[1:])
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
class CondorJob:
def __init__(self, job_id):
self.job_id = job_id
self.state = None
self.execute_machine = None
self.running_time = 0
def reset_state(self):
self.state = None
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import requests
import re
from threading import Thread
import queue
from threading import Semaphore
from lxml import etree
import json
import ssl
prefix = "http://www.cazy.org/"
fivefamilies = ["Glycoside-Hydrolases.html","GlycosylTransferases.html","Polysaccharide-Lyases.html","Carbohydrate-Esterases.html","Carbohydrate-Binding-Modules.html", "Auxiliary-Activities.html"]
#fivefamilies = ["Auxiliary-Activities.html"]
in_queue = queue.Queue()
writeLock = Semaphore(value = 1)
rx_cazypedia = re.compile(r'(http://www\.cazypedia\.org/index\.php/\w+)')
rx_prosite = re.compile(r'(http://prosite\.expasy\.org/\w+)')
re_taxon = re.compile(r'html">(\w+)</a> ((\d+))</span>')
family_ec = {}
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
def clean(text):
text = re.sub('<[^<]+?>', '', text)
text = text.replace("β","beta")
text = text.replace("\xce\xb2","beta")
text = text.replace("α","alpha")
text = text.replace("κ","kappa")
text = text.replace("\xce\xb1","alpha")
text = text.replace("\xe2\x80\x98", "'")
text = text.replace("\xe2\x80\x99", "'")
text = text.replace("Å", "angstrom")
text = text.replace("→", "->")
text = text.replace("≥;", ">=")
text = text.replace("“", "\"")
text = text.replace("”", "\"")
text = text.replace("–", "-")
text = text.replace("ß", "beta")
return text.strip()
def work():
while True:
url = in_queue.get()
#try:
container = {}
page = re.sub(r"\s+", " ", requests.get(url, verify=False).content.decode('iso8859-1').replace(r"\n", " "))
#print (url)
tree = etree.HTML(page)
family_name = re.findall(r"http://www\.cazy\.org/(\w+)\.html", url)[0]
container["name"] = family_name
if family_name in family_ec.keys():
container["ec"] =list(family_ec[family_name])
trs = tree.xpath("//tr")
title = ""
#print (trs)
for tr in trs:
headers = etree.HTML(etree.tostring(tr)).xpath("//th")
for header in headers:
inhalt = re.findall(r'>(.+?)</',etree.tostring(header).decode('iso8859-1'))
#print (inhalt)
if len(inhalt) > 0:
title = inhalt[0]
#print etree.tostring(header)
contents = etree.HTML(etree.tostring(tr).decode('iso8859-1')).xpath("//td")
for content in contents:
inhalts = re.findall(r'>(.+)</',etree.tostring(content).decode('iso8859-1'))
if len(inhalts) > 0:
inhalt = clean(inhalts[0])
# inhalt = inhalt.replace("α","alpha")
container[title] = inhalt
#print etree.tostring(content)
#print (container)
#print "hello"
container["distribution"] = {}
for i in re_taxon.findall(page):
taxon, number = i[0], int(i[1])
container["distribution"][taxon] = int(number)
cazypedia = re.findall(rx_cazypedia, page)
if len(cazypedia) > 0:
####there is a bug in cazy webpage about GH117 cazypedia link address
cazypedia_url = cazypedia[0]
cazypedia_url = re.sub(r"_Family_GH(\d+)",r"_Family_\1",cazypedia_url)
cazypedia_content = requests.get(cazypedia_url, verify=False).content.decode('iso8859-1').replace("\n"," ")
search_substrate = re.search(r'<h2> <span class="mw-headline" id="Substrate_specificities">\s+Substrate specificities.+?<p>(.+?)</p> <h2>',cazypedia_content)
#print cazypedia_content
if search_substrate:
inhalt = clean(search_substrate.group(1))
container["substrate_specificity"] = inhalt
#print container["substrate_specificity"]
search_residue = re.search(r'<h2> <span class="mw-headline" id="Catalytic_Residues">\s+Catalytic Residues.+?<p>(.+?)</p> <h2>',cazypedia_content)
#print cazypedia_content
if search_residue:
#print "OK"
inhalt = clean(search_residue.group(1))
container["catalytic_residues"] = inhalt
# print container["catalytic_residues"]
#if len(inhalt) > 0:
# print inhalt[0]
prosite = re.findall(rx_prosite, page)
if len(prosite) > 0:
prosite_content = requests.get(prosite[0], verify=False).content.decode('iso8859-1').replace("\n"," ")
#print prosite_content
search_pattern = re.search(r'<td><strong style="letter-spacing\:3px">(\S+)</strong>', prosite_content)
if search_pattern:
container["prosite_pattern"] = search_pattern.group(1)
regex_pattern = search_pattern.group(1).replace("-","").replace("x",r"\w")
regex_pattern = re.sub(r"\((\d+)\)",r"{\1}",regex_pattern)
regex_pattern = re.sub(r'\((\d+),(\d+)\)',r'{\1,\2}',regex_pattern)
regex_pattern = re.sub(r'\((\d+)\)',r'{\1}',regex_pattern)
#print container["family"]
#print regex_pattern
container["regex_pattern"] = regex_pattern
writeLock.acquire()
container["column"] = "cazy"
print (json.dumps(container))
writeLock.release()
#except:
#print "error " + url
# pass
#finally:
in_queue.task_done()
for i in range(7):
t = Thread(target=work)
t.daemon = True
t.start()
rx_ec = re.compile(r'<a href="http://www.enzyme-database.org/query.php\?ec=(\S+?)">\S+</a></th><td class="ec">\s+(.+?)</table>')
rx_ec_family = re.compile(r'<a href=(\w+)\.html id="separ">\w+</a>')
for family in fivefamilies:
address = prefix + family
page = requests.get(address, verify=False).content.decode('iso8859-1')
for ec in rx_ec.findall(page):
for fa in rx_ec_family.findall(ec[1]):
if fa not in family_ec:
family_ec[fa] = set()
family_ec[fa].add(ec[0])
families = re.findall(r'<option value="(http://www\.cazy\.org/\w+?\.html)">\w+</option>', page)
###go into each family
for family in families:
in_queue.put(family)
#print family_ec
in_queue.join()
|
nilq/baby-python
|
python
|
# execute
# pytest -s test_class.py
def setup_module():
print("setting up MODULE 1")
def teardown_module():
print("tearing down MODULE 1")
class TestClass1():
def setup_method(self):
print(" setting up TestClass1 INSTANCE")
def teardown_method(self):
print(" tearing down TestClass1 INSTANCE")
def test_11(self):
print(" test_11")
pass
def test_12(self):
print(" test_12")
pass
@classmethod
def setup_class(cls):
print(" setting up TestClass1")
@classmethod
def teardown_class(cls):
print(" tearing down TestClass1")
class TestClass2():
def setup_method(self):
print(" setting up TestClass2 INSTANCE")
def teardown_method(self):
print(" tearing down TestClass2 INSTANCE")
def test_21(self):
print(" test_21")
pass
def test_22(self):
print(" test_22")
pass
@classmethod
def setup_class(cls):
print(" setting up TestClass2")
@classmethod
def teardown_class(cls):
print(" tearing down TestClass2")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import signals
import json
import codecs
from twisted.enterprise import adbapi
from datetime import datetime
from hashlib import md5
class MovieItemPipeline(object):
def __init__(self):
self.file = codecs.open('./data/movie_item.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
################ People ####################
class MovieXPeoplePipeline1040(object):
def __init__(self):
self.file = codecs.open('./data/movie_Xpeople1040.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MoviePeoplePipeline5000(object):
def __init__(self):
self.file = codecs.open('./data/movie_people5000.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MoviePeoplePipeline10000(object):
def __init__(self):
self.file = codecs.open('./data/movie_people10000.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MoviePeoplePipeline15000(object):
def __init__(self):
self.file = codecs.open('./data/movie_people15000.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MoviePeoplePipeline20000(object):
def __init__(self):
self.file = codecs.open('./data/movie_people20000.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MoviePeoplePipeline25000(object):
def __init__(self):
self.file = codecs.open('./data/movie_people25000.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MoviePeoplePipeline30000(object):
def __init__(self):
self.file = codecs.open('./data/movie_people30000.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MoviePeoplePipeline35000(object):
def __init__(self):
self.file = codecs.open('./data/movie_people35000.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MoviePeoplePipeline40000(object):
def __init__(self):
self.file = codecs.open('./data/movie_people40000.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
################ Comment ####################
class MovieCommentPipeline20(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment20.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline40(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment40.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline60(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment60.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline80(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment80.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline100(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment100.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline120(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment120.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline140(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment140.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline160(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment160.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline180(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment180.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline200(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment200.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline225(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment225.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
class MovieCommentPipeline250(object):
def __init__(self):
self.file = codecs.open('./data/movie_comment250.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from osc_lib.i18n import _
from tripleoclient import command
from tripleoclient import constants
from tripleoclient import utils
class DownloadConfig(command.Command):
"""Download Overcloud Config"""
log = logging.getLogger(__name__ + ".DownloadConfig")
def get_parser(self, prog_name):
parser = super(DownloadConfig, self).get_parser(prog_name)
parser.add_argument(
'--name',
dest='name',
default='overcloud',
help=_('The name of the plan, which is used for the object '
'storage container, workflow environment and orchestration '
'stack names.'),
)
parser.add_argument(
'--config-dir',
dest='config_dir',
default=os.path.join(
constants.CLOUD_HOME_DIR,
'tripleo-config'
),
help=_('The directory where the configuration files will be '
'pushed'),
)
parser.add_argument(
'--config-type',
dest='config_type',
type=list,
default=None,
help=_('Type of object config to be extract from the deployment, '
'defaults to all keys available'),
)
parser.add_argument(
'--no-preserve-config',
dest='preserve_config_dir',
action='store_false',
default=True,
help=('If specified, will delete and recreate the --config-dir '
'if it already exists. Default is to use the existing dir '
'location and overwrite files. Files in --config-dir not '
'from the stack will be preserved by default.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
name = parsed_args.name
config_dir = os.path.abspath(parsed_args.config_dir)
config_type = parsed_args.config_type
preserve_config_dir = parsed_args.preserve_config_dir
extra_vars = {'plan': name,
'config_dir': config_dir,
'preserve_config': preserve_config_dir}
if config_type:
extra_vars['config_type'] = config_type
with utils.TempDirs() as tmp:
utils.run_ansible_playbook(
playbook='cli-config-download-export.yaml',
inventory='localhost,',
workdir=tmp,
playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
verbosity=utils.playbook_verbosity(self=self),
extra_vars=extra_vars)
print("The TripleO configuration has been successfully generated "
"into: {0}".format(config_dir))
|
nilq/baby-python
|
python
|
"""Middleware used by Reversion."""
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from reversion.revisions import revision_context_manager
REVISION_MIDDLEWARE_FLAG = "reversion.revision_middleware_active"
class RevisionMiddleware(object):
"""Wraps the entire request in a revision."""
def process_request(self, request):
"""Starts a new revision."""
if request.META.get(REVISION_MIDDLEWARE_FLAG, False):
raise ImproperlyConfigured("RevisionMiddleware can only be included in MIDDLEWARE_CLASSES once.")
request.META[REVISION_MIDDLEWARE_FLAG] = True
revision_context_manager.start()
def _close_revision(self, request):
"""Closes the revision."""
if request.META.get(REVISION_MIDDLEWARE_FLAG, False):
del request.META[REVISION_MIDDLEWARE_FLAG]
revision_context_manager.end()
def process_response(self, request, response):
"""Closes the revision."""
# look to see if the session has been accessed before looking for user to stop Vary: Cookie
if hasattr(request, 'session') and request.session.accessed \
and hasattr(request, "user") and request.user is not None and request.user.is_authenticated() \
and revision_context_manager.is_active():
revision_context_manager.set_user(request.user)
self._close_revision(request)
return response
def process_exception(self, request, exception):
"""Closes the revision."""
revision_context_manager.invalidate()
self._close_revision(request)
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Union
from loguru import logger
from dome9 import BaseDataclassRequest, APIUtils, Dome9Resource, Client
from dome9.consts import NewGroupBehaviors
from dome9.exceptions import UnsupportedCloudAccountCredentialsBasedType, UnsupportedCloudAccountGroupBehaviors
class AwsCloudAccountConsts(Enum):
CLOUD_ACCOUNTS = 'CloudAccounts'
REGION_CONFIG = 'region-conf'
ORGANIZATIONAL_UNIT = 'organizationalUnit'
NAME = 'name'
CREDENTIALS = 'credentials'
class AwsCloudAccountCredentialsBasedType(Enum):
USER_BASED = 'UserBased'
ROLE_BASED = 'RoleBased'
@dataclass
class CloudAccountCredentials:
"""The information needed for Dome9 System in order to connect to the AWS cloud account
:link https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountcredentialsviewmodel
:param arn: [Required] AWS Role ARN (to be assumed by Dome9 System)
:type arn: str
:param secret: [Required] The AWS role External ID (Dome9 System will have to use this secret in order to assume the role)
:type secret: str
:param type: [Required] The cloud account onbiarding method. Should be set to "RoleBased" as other methods are deprecated
:type type str
"""
arn: str
secret: str
type: str = AwsCloudAccountCredentialsBasedType.ROLE_BASED.value
@logger.catch(reraise=True)
def __post_init__(self):
type_options = [type_option.value for type_option in AwsCloudAccountCredentialsBasedType]
if self.type not in type_options:
raise UnsupportedCloudAccountCredentialsBasedType(f'base type must be one of the following {type_options}')
@dataclass
class CloudAccount(BaseDataclassRequest):
"""The new AWS account data
:link https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountviewmodel
:param name: The cloud account name
:type name: str
:param credentials: [Required] The information needed for Dome9 System in order to connect to the AWS cloud account
:type credentials: CloudAccountCredentials
:param organizational_unit_id:
:type organizational_unit_id: str
"""
name: str
credentials: CloudAccountCredentials
organizational_unit_id: str = None
@dataclass
class CloudAccountRegionConfiguration:
"""AWS cloud account net sec region
:link https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountregionconfigurationviewmodel
:param region: Dome9 representation value for the AWS region
:type region: str
:param new_group_behavior: The Protection Mode that Dome9 will apply to new security groups detected in the cloud account. ReadOnly New Security Groups will be included in Dome9 in Read-Only mode, without changes to any of the rules FullManage New Security Groups will be included in Dome9 in Full Protection mode, without changes to any of the rules Reset New Security Groups will be included in Dome9 in Full Protection mode, and all inbound and outbound rules will be cleared
:type new_group_behavior: str
"""
region: str
new_group_behavior: str
@logger.catch(reraise=True)
def __post_init__(self):
APIUtils.check_is_valid_aws_region_id(self.region)
new_group_behaviors = [new_group_behavior.value for new_group_behavior in NewGroupBehaviors]
if self.new_group_behavior not in new_group_behaviors:
raise UnsupportedCloudAccountGroupBehaviors(f'new group behaviors must be one of the following {new_group_behaviors}')
@dataclass
class AwsCloudAccountUpdateName(BaseDataclassRequest):
"""AWS cloud account update name
:link https://api-v2-docs.dome9.com/#cloudaccounts_updatecloudaccountname
:param cloud_account_id:
:type cloud_account_id: str
:param data:the new name for the account
:type data: str
"""
cloud_account_id: str
data: str
@dataclass
class CloudAccountRegionConfigurationViewModel(BaseDataclassRequest):
"""AWS cloud account update config
:link https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountpartialupdateviewmodel_dome9-web-api-models-cloudaccountregionconfigurationviewmodel_
:param cloud_account_id: The Dome9 cloud account id, at least one of the following properties must be provided: "cloudAccountId", "externalAccountNumber"
:type cloud_account_id: str
:param data:
:type data: CloudAccountRegionConfiguration
"""
cloud_account_id: str
data: CloudAccountRegionConfiguration
@dataclass
class CloudAccountUpdateOrganizationalUnitId(BaseDataclassRequest):
"""AWS cloud account update organizational unit id
:link https://api-v2-docs.dome9.com/#cloudaccounts_updateorganziationalid
:param organizational_unit_id: The Guid ID of the Organizational Unit to attach to. Use 'null' to attach to the root Organizational Unit
:type organizational_unit_id: str
"""
organizational_unit_id: str
@dataclass
class CloudAccountCredentialsViewModel(BaseDataclassRequest):
"""AWS cloud account update credentials
:link https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountpartialupdateviewmodel_dome9-web-api-models-cloudaccountcredentialsviewmodel_
:param cloud_account_id: The Dome9 cloud account id
:type cloud_account_id: str
:param data:
:type data: CloudAccountCredentials
"""
cloud_account_id: str
data: CloudAccountCredentials
class AwsCloudAccount(Dome9Resource):
def __init__(self, client: Client):
super().__init__(client)
def create(self, body: CloudAccount) -> Dict:
"""Create (onboard) aws cloud account
:link https://api-v2-docs.dome9.com/#cloudaccounts_post
:param body: Details for the new aws cloud account
:type body: CloudAccount
:returns https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountviewmodel
:rtype CloudAccount
"""
return self._post(route=AwsCloudAccountConsts.CLOUD_ACCOUNTS.value, body=body)
def get(self, aws_cloud_account_id: str = '') -> Union[Dict, List[Dict]]:
"""Fetch a specific AWS cloud account
:link https://api-v2-docs.dome9.com/#cloudaccounts_get
:param aws_cloud_account_id: Dome9 aws cloud account id
:type aws_cloud_account_id: str
:returns https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountviewmodel
:rtype CloudAccount
"""
route = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{aws_cloud_account_id}'
return self._get(route=route)
def update_cloud_account_name(self, body: AwsCloudAccountUpdateName) -> Dict:
"""Update an AWS cloud account name
:link https://api-v2-docs.dome9.com/#cloudaccounts_updatecloudaccountname
:param body: Details for dome9 aws cloud account
:type body: AwsCloudAccountUpdateName
:returns https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountviewmodel
:rtype CloudAccount
"""
route = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{AwsCloudAccountConsts.NAME.value}'
return self._put(route=route, body=body)
def update_region_config(self, body: CloudAccountRegionConfigurationViewModel) -> Dict:
"""Update an AWS cloud account region configuration
:link https://api-v2-docs.dome9.com/#cloudaccounts_updatecloudaccountregionconf
:param body: updated Regional Configuration parameters for the account
:type body: CloudAccountRegionConfigurationViewModel
:returns https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountviewmodel
:rtype CloudAccount
"""
route = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{AwsCloudAccountConsts.REGION_CONFIG.value}'
return self._put(route=route, body=body)
def update_organizational_id(self, aws_cloud_account_id: str, body: CloudAccountUpdateOrganizationalUnitId) -> Dict:
"""Update the ID of the Organizational Unit that this cloud account will be attached to. Use 'null' for the root Organizational Unit
:link https://api-v2-docs.dome9.com/#cloudaccounts_updateorganziationalid
:param aws_cloud_account_id: The Dome9 Guid ID of the AWS cloud account
:type aws_cloud_account_id: str
:param body: The Guid ID of the Organizational Unit to attach to. Use 'null' to attach to the root Organizational Unit
:type body: CloudAccountUpdateOrganizationalUnitId
:returns https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountviewmodel
:rtype CloudAccount
"""
route = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{aws_cloud_account_id}/{AwsCloudAccountConsts.ORGANIZATIONAL_UNIT.value}'
return self._put(route=route, body=body)
def update_credentials(self, body: CloudAccountCredentialsViewModel) -> Dict:
"""Update credentials for an AWS cloud account in Dome9. At least one of the following properties must be provided: "cloudAccountId", "externalAccountNumber"
:link https://api-v2-docs.dome9.com/#cloudaccounts_updatecloudaccountcredentials
:param body: credentials block
:type body: CloudAccountCredentialsViewModel
:returns https://api-v2-docs.dome9.com/#schemadome9-web-api-models-cloudaccountviewmodel
:rtype CloudAccount
"""
route = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{AwsCloudAccountConsts.CREDENTIALS.value}'
return self._put(route=route, body=body)
def delete(self, aws_cloud_account_id: str):
"""Delete an AWS cloud account
:link https://api-v2-docs.dome9.com/#cloudaccounts_delete
:param aws_cloud_account_id: The Dome9 AWS account id (UUID)
:type aws_cloud_account_id: str
:returns: None
"""
route = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{aws_cloud_account_id}'
return self._delete(route=route)
|
nilq/baby-python
|
python
|
from requests import get
import json
from datetime import datetime
from dotenv import load_dotenv
import os
def get_public_ip():
ip = get('https://api.ipify.org').text
# print('My public IP address is: {}'.format(ip))
key = os.environ.get("api_key")
api_url = 'https://geo.ipify.org/api/v1?'
url = api_url + 'apiKey=' + key + '&ipAddress=' + ip
resp = get(url).text
resp_data = json.loads(resp)
# print(resp_data)
try:
ip = str(resp_data['ip'])
except KeyError:
ip = "none"
try:
country = str(resp_data['location']['country'])
except KeyError:
country = "none"
try:
region = str(resp_data['location']['region'])
except KeyError:
region = "none"
try:
city = str(resp_data['location']['city'])
except KeyError:
city = "none"
try:
lati = str(resp_data['location']['lat'])
except KeyError:
lati = "none"
try:
longi = str(resp_data['location']['lng'])
except KeyError:
longi = "none"
try:
postal = str(resp_data['location']['postalCode'])
except KeyError:
postal = "none"
try:
timez = str(resp_data['location']['timezone'])
except KeyError:
timez = "none"
try:
geoname = str(resp_data['location']['geonameId'])
except KeyError:
geoname = "none"
try:
temp_list = resp_data['domains']
domains = ','.join([str(i) for i in temp_list])
except KeyError:
domains = "none"
try:
num = str(resp_data['as']['asn'])
except KeyError:
num = "none"
try:
name = str(resp_data['as']['name'])
except KeyError:
name = "none"
try:
route = str(resp_data['as']['route'])
except KeyError:
route = "none"
try:
domain = str(resp_data['as']['domain'])
except KeyError:
domain = "none"
try:
type_val = str(resp_data['as']['type'])
except KeyError:
type_val = "none"
try:
isp = str(resp_data['isp'])
except KeyError:
isp = "none"
try:
proxy = str(resp_data['proxy']['proxy'])
except KeyError:
proxy = "none"
try:
vpn = str(resp_data['proxy']['vpn'])
except KeyError:
vpn = "none"
try:
tor = str(resp_data['proxy']['tor'])
except KeyError:
tor = "none"
data_list = []
data_list.append(ip)
data_list.append(country)
data_list.append(region)
data_list.append(city)
data_list.append(lati)
data_list.append(longi)
data_list.append(postal)
data_list.append(timez)
data_list.append(geoname)
data_list.append(domains)
data_list.append(num)
data_list.append(name)
data_list.append(route)
data_list.append(domain)
data_list.append(type_val)
data_list.append(isp)
data_list.append(proxy)
data_list.append(vpn)
data_list.append(tor)
return(data_list)
if __name__ == "__main__":
load_dotenv()
data = get_public_ip()
print(data)
# ip_addr,country,region,city,lati,longi,postalcode,timezone,geonameId,domains,asn,name,route,domain,type,isp,proxy,vpn,tor
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.addons.wecom_api.api.wecom_abstract_api import ApiException
import logging
_logger = logging.getLogger(__name__)
RESPONSE = {}
class EmployeeBindWecom(models.TransientModel):
_name = "wecom.wizard.employee_bind_wecom"
_description = "Employees bind enterprise wechat members"
name = fields.Char(
string="Name", required=True, compute="_compute_user", store=True,
)
avatar = fields.Char(string="Avatar", compute="_compute_user", store=True,)
wecom_userid = fields.Char(string="Enterprise wechat user Id", required=True)
employee_id = fields.Many2one(
"hr.employee", string="Related Employee", required=True, readonly=True
)
employee_name = fields.Char(related="employee_id.name", readonly=True)
company_id = fields.Many2one(related="employee_id.company_id", readonly=True)
@api.depends("company_id", "wecom_userid")
def _compute_user(self):
for employee in self:
if employee.company_id and employee.wecom_userid:
company = employee.company_id
try:
wxapi = self.env["wecom.service_api"].InitServiceApi(
company.corpid, company.contacts_app_id.secret
)
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"USER_GET"
),
{"userid": employee.wecom_userid},
)
global RESPONSE
RESPONSE = response
employee.name = response["name"]
employee.avatar = response["thumb_avatar"]
except ApiException as ex:
return self.env["wecomapi.tools.action"].ApiExceptionDialog(
ex, raise_exception=True
)
else:
employee.name = None
employee.avatar = None
def bind_wecom_member(self):
# if self.name is None:
# raise UserError(
# _("There is no member with ID [%s] in enterprise wechat")
# % (self.wecom_userid)
# )
employee = (
self.env["hr.employee"]
.sudo()
.search(
[
("wecom_userid", "=", self.wecom_userid.lower()),
("is_wecom_user", "=", True),
("company_id", "=", self.company_id.id),
"|",
("active", "=", True),
("active", "=", False),
],
)
)
if len(employee) > 0:
raise UserError(
_("Employee with ID [%s] already exists") % (self.wecom_userid)
)
else:
self.employee_id.write(
{
"is_wecom_user": True,
"wecom_userid": RESPONSE["userid"],
"name": RESPONSE["name"],
"qr_code": RESPONSE["qr_code"],
}
)
if self.employee_id.user_id:
# 关联了User
self.employee_id.user_id.write(
{
"is_wecom_user": True,
"wecom_userid": RESPONSE["userid"],
"name": RESPONSE["name"],
"notification_type": "inbox",
"qr_code": RESPONSE["qr_code"],
}
)
# self.employee_id._sync_user(
# self.env["res.users"].sudo().browse(self.employee_id.user_id),
# bool(self.employee_id.image_1920),
# )
|
nilq/baby-python
|
python
|
import copy
import struct
class SBox:
def __init__(self):
# define S-box
self.S = [
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
]
# create inverse look-up
self.S_inverse = [0] * 256
for i, x in enumerate(self.S):
self.S_inverse[x] = i
class AES:
def __init__(self, key):
if len(key) != 16:
raise Exception("Only 16-byte keys are supported")
self.rounds = 10
self.constants = [
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f,
0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91
]
self.S = SBox()
self.K = [
[0] * 4 for i in xrange(self.rounds + 1)
]
self.K_inverse = [
[0] * 4 for i in xrange(self.rounds + 1)
]
for i in xrange(4):
x = struct.unpack('>i', key[4 * i : 4 * i + 4])[0]
self.K[i // 4][i % 4] = x
self.K_inverse[self.rounds - i // 4][i % 4] = x
def _key_expansion(self):
if __name__ == '__main__':
c = AES("0" * 16)
|
nilq/baby-python
|
python
|
from OpenGL.GL import glVertex3fv, glVertex2fv
class Vertex:
def __init__(self, x, y, z):
self._x = x
self._y = y
self._z = z
def draw(self):
if self._z is None:
glVertex2fv((self._x,self._y))
else:
glVertex3fv((self._x,self._y,self._z))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
setup(
name="shipwreck",
version="0.0.1",
description="An experiment with using blob storage as my recordings storage!",
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
license="MIT License",
author="semick-dev",
author_email="sbeddall@live.com",
url="https://github.com/semick-dev/shipwreck",
classifiers=[
"Development Status :: 3 - Alpha",
],
packages=find_packages(exclude=["tests"]),
python_requires=">=3.6",
install_requires=["azure-storage-blob>=12.10.0"],
entry_points={"console_scripts": ["ship = ship:main"]},
)
|
nilq/baby-python
|
python
|
from datetime import date, datetime
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from tradeaccounts.models import Positions, TradeAccount, StockPositionSnapshot
from tradeaccounts.utils import calibrate_realtime_position
from users.models import User
class Command(BaseCommand):
help = 'Taking snapshot for investors stock position'
def handle(self, *args, **options):
investors = User.objects.filter(
is_active=True).exclude(is_superuser=True)
if investors is not None and len(investors):
for investor in investors:
# 2. 根据用户获得所有持仓(未清仓)
# 3. 获得最新报价,更新持仓和交易记录
self.sync_stock_position_for_investor(
investor)
# 4. 根据最新持仓信息更新交易账户余额
stock_positions = Positions.objects.filter(trader=investor).exclude(is_liquidated=True)
for position in stock_positions:
# position.update_account_balance()
# 5. 生成账户快照
self.take_position_snapshot(position)
def sync_stock_position_for_investor(self, investor):
'''
根据stock_symbol更新最新的价格
'''
latest_positions = []
in_stock_positions = Positions.objects.select_for_update().filter(
trader=investor).exclude(is_liquidated=True,)
with transaction.atomic():
for entry in in_stock_positions:
calibrate_realtime_position(entry)
latest_positions.append(
{
'id': entry.pk,
'symbol': entry.stock_code,
'name': entry.stock_name,
'position_price': entry.position_price,
'realtime_price': entry.current_price,
'profit': entry.profit,
'profit_ratio': entry.profit_ratio,
'lots': entry.lots,
'target_position': entry.target_position,
'amount': entry.cash,
}
)
return latest_positions
def take_position_snapshot(self, position):
today = date.today()
# 判断是否存在snapshot
snapshots = StockPositionSnapshot.objects.filter(p_id=position.id, snap_date=today)
if snapshots is not None and not snapshots.exists():
snapshot = StockPositionSnapshot()
snapshot.take_snapshot(position)
|
nilq/baby-python
|
python
|
# Created by MechAviv
# ID :: [130030103]
# Empress Road : Drill Hall
|
nilq/baby-python
|
python
|
from src.music_utils.PlaylistHandler import create_music_playlist
from src.network.OperationType import OperationType
from src.network.NetworkCommunication import *
all_music = create_music_playlist("All Songs")
socket = None
log = None
def init(sock, logger):
global socket
socket = sock
global log
log = logger
def do_req(req, address):
if req[0] == OperationType.ALL_SONGS.name:
_send_all_song_playlist()
return
elif req[0] == OperationType.DISCONNECT.name:
_disconnect(address)
return
elif req[0] == OperationType.REQ_SONG.name:
_send_song(req[1:])
return
else:
return
def _disconnect(address):
log.write("{} disconnecting".format(address))
socket.close()
def _send_all_song_playlist():
send_req(assemble_req(OperationType.ALL_SONGS.name, all_music.string()), socket, log)
def _send_song(song_index):
i = int(song_index[0])
file = open(all_music.songs[i].file_name, "rb")
send_req(file.read(), socket, log, encode=False)
file.close()
|
nilq/baby-python
|
python
|
# Copyright 2021 AI Redefined Inc. <dev+cogment@ai-r.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from types import SimpleNamespace
from typing import List
import cogment as _cog
import data_pb2
import subdir.otherdata_pb2
_plane_class = _cog.ActorClass(
id='plane',
config_type=None,
action_space=data_pb2.Human_PlaneAction,
observation_space=data_pb2.Observation,
)
_ai_drone_class = _cog.ActorClass(
id='ai_drone',
config_type=data_pb2.DroneConfig,
action_space=data_pb2.Ai_DroneAction,
observation_space=data_pb2.Observation,
)
actor_classes = _cog.actor_class.ActorClassList(
_plane_class,
_ai_drone_class,
)
trial = SimpleNamespace(
config_type=data_pb2.TrialConfig,
)
# Environment
environment = SimpleNamespace(
config_type=subdir.otherdata_pb2.Data,
)
class ActionsTable:
plane: List[data_pb2.Human_PlaneAction]
ai_drone: List[data_pb2.Ai_DroneAction]
def __init__(self, trial):
self.plane = [data_pb2.Human_PlaneAction()
for _ in range(trial.actor_counts[0])]
self.ai_drone = [data_pb2.Ai_DroneAction()
for _ in range(trial.actor_counts[1])]
def all_actions(self):
return self.plane + self.ai_drone
class plane_ObservationProxy(_cog.env_service.ObservationProxy):
@property
def snapshot(self) -> data_pb2.Observation:
return self._get_snapshot(data_pb2.Observation)
@snapshot.setter
def snapshot(self, v):
self._set_snapshot(v)
class ai_drone_ObservationProxy(_cog.env_service.ObservationProxy):
@property
def snapshot(self) -> data_pb2.Observation:
return self._get_snapshot(data_pb2.Observation)
@snapshot.setter
def snapshot(self, v):
self._set_snapshot(v)
class ObservationsTable:
plane: List[plane_ObservationProxy]
ai_drone: List[ai_drone_ObservationProxy]
def __init__(self, trial):
self.plane = [plane_ObservationProxy()
for _ in range(trial.actor_counts[0])]
self.ai_drone = [ai_drone_ObservationProxy()
for _ in range(trial.actor_counts[1])]
def all_observations(self):
return self.plane + self.ai_drone
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2021-03-22 21:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activityinfo', '0173_auto_20210312_1957'),
]
operations = [
migrations.AddField(
model_name='indicator',
name='has_hpm_hac_2_note',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='indicator',
name='hpm_hac_2_label',
field=models.CharField(blank=True, max_length=5000, null=True, verbose_name=b'HPM HAC 2 Label'),
),
migrations.AlterField(
model_name='indicator',
name='hpm_additional_cumulative',
field=models.PositiveIntegerField(default=0, verbose_name=b'HPM Cumulative'),
),
migrations.AlterField(
model_name='indicator',
name='hpm_hac_2_additional_cumulative',
field=models.PositiveIntegerField(default=0, verbose_name=b'HPM HAC 2 Cumulative'),
),
migrations.AlterField(
model_name='indicator',
name='is_standalone_HAC_2',
field=models.BooleanField(default=False, verbose_name=b'Is Standalone HAC 2 HPM'),
),
]
|
nilq/baby-python
|
python
|
bl_info = {
"name": "keMouseAxisMove",
"author": "Kjell Emanuelsson",
"category": "Modeling",
"version": (1, 1, 4),
"blender": (2, 80, 0),
}
import bpy
from mathutils import Vector, Matrix
from bpy_extras.view3d_utils import region_2d_to_location_3d
from .ke_utils import getset_transform, restore_transform, average_vector
class VIEW3D_OT_ke_mouse_axis_move(bpy.types.Operator):
bl_idname = "view3d.ke_mouse_axis_move"
bl_label = "Mouse Axis Move"
bl_description = "Runs Grab with Axis auto-locked based on your mouse movement (or viewport when rot) using recalculated orientation " \
"based on the selected Orientation type."
bl_options = {'REGISTER', 'UNDO'}
mode: bpy.props.EnumProperty(
items=[("MOVE", "Move", "", 1),
("DUPE", "Duplicate", "", 2),
("ROT", "Rotate", "", 3),
("SCL", "Resize", "", 4),
("CURSOR", "Cursor", "", 5)
],
name="Mode",
default="MOVE")
mouse_pos = Vector((0, 0))
startpos = Vector((0, 0, 0))
tm = Matrix().to_3x3()
rv = None
ot = "GLOBAL"
obj = None
obj_loc = None
em_types = {'MESH', 'CURVE', 'SURFACE', 'META', 'FONT', 'HAIR', 'GPENCIL'}
em_normal_mode = False
@classmethod
def description(cls, context, properties):
if properties.mode == "DUPE":
return "Duplicates mesh/object before running Mouse Axis Move"
elif properties.mode == "CURSOR":
return "Mouse Axis Move for the Cursor. Global orientation or Cursor orientation (used in all modes except Global)"
else:
return "Runs Grab, Rotate or Resize with Axis auto-locked based on your mouse movement (or viewport when Rot) " \
"using recalculated orientation based on the selected Orientation type."
# @classmethod
# def poll(cls, context):
# return context.object is not None
@classmethod
def get_mpos(cls, context, coord, pos):
region = context.region
rv3d = context.region_data
return region_2d_to_location_3d(region, rv3d, coord, pos)
def invoke(self, context, event):
sel_obj = [o for o in context.selected_objects]
if sel_obj:
if len(sel_obj) > 1:
self.obj_loc = average_vector([o.location for o in sel_obj])
else:
self.obj_loc = sel_obj[0].location
else:
self.report({"INFO"}, " No objects selected ")
return {'CANCELLED'}
if sel_obj and context.object is None:
self.obj = sel_obj[0]
for o in sel_obj:
if o.type in self.em_types:
self.obj = o
break
elif context.object is not None:
self.obj = context.object
else:
self.report({"INFO"}, " No valid objects selected ")
return {'CANCELLED'}
# mouse track start
self.mouse_pos[0] = int(event.mouse_region_x)
self.mouse_pos[1] = int(event.mouse_region_y)
# Mouse vec start ( lazy edit mode overwrite later)
if self.mode != "ROT":
self.startpos = self.get_mpos(context, self.mouse_pos, self.obj_loc)
# get rotation vectors
og = getset_transform(setglobal=False)
self.ot = og[0]
if self.mode == "CURSOR":
if og[0] == "GLOBAL":
pass
else:
og[0] = "CURSOR"
self.tm = context.scene.cursor.matrix.to_3x3()
else:
# check type
if self.obj.type in self.em_types and bool(self.obj.data.is_editmode):
em = True
else:
em = "OBJECT"
if og[0] == "GLOBAL":
pass
elif og[0] == "CURSOR":
self.tm = context.scene.cursor.matrix.to_3x3()
elif og[0] == "LOCAL" or og[0] == "NORMAL" and not em:
self.tm = self.obj.matrix_world.to_3x3()
elif og[0] == "VIEW":
self.tm = context.space_data.region_3d.view_matrix.inverted().to_3x3()
elif og[0] == "GIMBAL":
self.report({"INFO"}, "Gimbal Orientation not supported")
return {'CANCELLED'}
# NORMAL / SELECTION
elif em != "OBJECT":
self.obj.update_from_editmode()
sel = [v for v in self.obj.data.vertices if v.select]
sel_co = average_vector([self.obj.matrix_world @ v.co for v in sel])
# Use selection for mouse start 2d pos instead of obj loc
self.startpos = self.get_mpos(context, self.mouse_pos, sel_co)
if sel:
try:
bpy.ops.transform.create_orientation(name='keTF', use_view=False, use=True, overwrite=True)
self.tm = context.scene.transform_orientation_slots[0].custom_orientation.matrix.copy()
bpy.ops.transform.delete_orientation()
restore_transform(og)
# if og[1] == "ACTIVE_ELEMENT":
self.em_normal_mode = True
except RuntimeError:
print("Fallback: Invalid selection for Orientation - Using Local")
# Normal O. with a entire cube selected will fail create_o.
bpy.ops.transform.select_orientation(orientation='LOCAL')
self.tm = self.obj.matrix_world.to_3x3()
else:
self.report({"INFO"}, " No elements selected ")
return {'CANCELLED'}
else:
self.report({"INFO"}, "Unsupported Orientation Mode")
return {'CANCELLED'}
if self.mode == "DUPE":
if em != "OBJECT":
bpy.ops.mesh.duplicate('INVOKE_DEFAULT')
else:
if bpy.context.scene.kekit.tt_linkdupe:
bpy.ops.object.duplicate('INVOKE_DEFAULT', linked=True)
else:
bpy.ops.object.duplicate('INVOKE_DEFAULT', linked=False)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def modal(self, context, event):
if event.type == 'MOUSEMOVE':
# mouse track end candidate
new_mouse_pos = Vector((int(event.mouse_region_x), int(event.mouse_region_y)))
t1 = abs(new_mouse_pos[0] - self.mouse_pos[0])
t2 = abs(new_mouse_pos[1] - self.mouse_pos[1])
if t1 > 10 or t2 > 10 or self.mode == "ROT":
if self.mode == "ROT":
# no need to track mouse vec
rm = context.space_data.region_3d.view_matrix
v = self.tm.inverted() @ Vector(rm[2]).to_3d()
x, y, z = abs(v[0]), abs(v[1]), abs(v[2])
else:
# mouse vec end
newpos = self.get_mpos(context, new_mouse_pos, self.obj_loc)
v = self.tm.inverted() @ Vector(self.startpos - newpos).normalized()
x, y, z = abs(v[0]), abs(v[1]), abs(v[2])
if x > y and x > z:
axis = True, False, False
oa = "X"
elif y > x and y > z:
axis = False, True, False
oa = "Y"
else:
axis = False, False, True
oa = "Z"
if self.mode == "ROT":
bpy.ops.transform.rotate('INVOKE_DEFAULT', orient_axis=oa, orient_type=self.ot,
orient_matrix=self.tm, orient_matrix_type=self.ot,
constraint_axis=axis, mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH', proportional_size=1,
use_proportional_connected=False, use_proportional_projected=False)
elif self.mode == "SCL":
bpy.ops.transform.resize('INVOKE_DEFAULT', orient_type=self.ot,
orient_matrix=self.tm, orient_matrix_type=self.ot,
constraint_axis=axis, mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH', proportional_size=1,
use_proportional_connected=False, use_proportional_projected=False)
elif self.mode == "CURSOR":
bpy.ops.transform.translate('INVOKE_DEFAULT', orient_type=self.ot, orient_matrix_type=self.ot,
constraint_axis=axis, mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH', cursor_transform=True,
use_proportional_connected=False, use_proportional_projected=False)
else:
if self.em_normal_mode:
axis = False, False, True
bpy.ops.transform.translate('INVOKE_DEFAULT', orient_type=self.ot, orient_matrix_type=self.ot,
constraint_axis=axis, mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH', proportional_size=1,
use_proportional_connected=False, use_proportional_projected=False)
return {'FINISHED'}
elif event.type == 'ESC':
# Justincase
return {'CANCELLED'}
return {'RUNNING_MODAL'}
# -------------------------------------------------------------------------------------------------
# Class Registration & Unregistration
# -------------------------------------------------------------------------------------------------
def register():
bpy.utils.register_class(VIEW3D_OT_ke_mouse_axis_move)
def unregister():
bpy.utils.unregister_class(VIEW3D_OT_ke_mouse_axis_move)
if __name__ == "__main__":
register()
|
nilq/baby-python
|
python
|
# setup.py
import os, sys, re
# get version info from module without importing it
version_re = re.compile("""__version__[\s]*=[\s]*['|"](.*)['|"]""")
with open('hello_world.py') as f:
content = f.read()
match = version_re.search(content)
version = match.group(1)
readme = os.path.join(os.path.dirname(__file__), 'README.md')
long_description = open(readme).read()
SETUP_ARGS = dict(
name = 'hello_world',
version = version,
description = ('Grabs the "Hellow World" Wikipedia page and prints its title'),
long_description = long_description,
url = 'github url to be provided',
author ='<AUTHOR>',
author_email ='<EMAIL>',
license = 'MIT',
include_package_data = True,
classifiers = [
'Development Status :; 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approvied :: MIT License',
'Operating Syste :: OS Independent',
'Programming Language :: Python :: 3.7',
],
py_modules = ['hello_world',],
install_requires = [
'requests >= 2.22',
],
)
if __name__ == '__main__':
from setuptools import setup, find_packages
SETUP_ARGS['packages'] = find_packages()
setup(**SETUP_ARGS)
|
nilq/baby-python
|
python
|
# python module to make interfacing with the cube simpler
import requests
import json
class Animation(object):
def __init__(self):
self.animation_type = "None"
def to_json(self):
return f'{{"animation":{self.animation_type}}}'
class Blink(Animation):
def __init__(self, count=1, wait=100, red1=0, green1=0, blue1=255, red2=0, green2=0, blue2=0):
self.Count = count
self.Wait = wait
self.Red1 = red1
self.Green1 = green1
self.Blue1 = blue1
self.Red2 = red2
self.Green2 = green2
self.Blue2 = blue2
self.animation_type = "blink"
def to_json(self):
data = {
"animation": "blink",
"count": self.Count,
"wait": self.Wait,
"color": [
self.Red1,
self.Green1,
self.Blue1
],
"color2": [
self.Red2,
self.Green2,
self.Blue2
]
}
return json.dumps(data)
class Breathe(Animation):
def __init__(self, count=1, length=1000, red=0, green=0, blue=255):
self.Count = count
self.Length = length
self.Red = red
self.Green = green
self.Blue = blue
self.animation_type = "breathe"
def to_json(self):
data = {
"animation": "breathe",
"count": self.Count,
"length": self.Length,
"color": [
self.Red,
self.Green,
self.Blue
]
}
return json.dumps(data)
class Cube():
def __init__(self, url):
self.BASEURL = url
def get_color(self):
code, json = self.get('/color')
if code == 200: return json['red'], json['green'], json['blue']
return 0, 0, 0
def set_color(self, red, green, blue):
data = f'{{"red":{red}, "green":{green}, "blue":{blue}}}'
self.post('/color', data)
def animate(self, animation):
data = animation.to_json()
self.post('/animate', data)
def set_tap(self, animation):
data = animation.to_json()
self.post('/tap', data)
def get(self, path):
r = requests.get(self.BASEURL+path)
if r.text:
return r.status_code, r.json()
return r.status_code, ''
def post(self, path, data):
r = requests.post(self.BASEURL+path, data=data)
if r.text:
return r.status_code, r.json()
return r.status_code, ''
|
nilq/baby-python
|
python
|
"""Check the configuration for cspell.
See `cSpell
<https://github.com/streetsidesoftware/cspell/tree/master/packages/cspell>`_.
"""
import itertools
import json
import os
import textwrap
from configparser import ConfigParser
from pathlib import Path
from typing import Any, Iterable, List, Sequence, Union
import yaml
from repoma.errors import PrecommitError
from repoma.utilities import CONFIG_PATH, REPOMA_DIR, rename_file
from repoma.utilities.executor import Executor
from repoma.utilities.precommit import (
PrecommitConfig,
load_round_trip_precommit_config,
)
from repoma.utilities.readme import add_badge, remove_badge
from repoma.utilities.vscode import (
add_vscode_extension_recommendation,
remove_vscode_extension_recommendation,
)
__VSCODE_EXTENSION_NAME = "streetsidesoftware.code-spell-checker"
# cspell:ignore pelling
# pylint: disable=line-too-long
# fmt: off
__BADGE = (
"[](https://github.com/streetsidesoftware/cspell/tree/master/packages/cspell)"
)
# fmt: on
__BADGE_PATTERN = r"\[\!\[[Ss]pelling.*\]\(.*cspell.*\)\]\(.*cspell.*\)\n?"
__REPO_URL = "https://github.com/streetsidesoftware/cspell-cli"
with open(REPOMA_DIR / ".template" / CONFIG_PATH.cspell) as __STREAM:
__EXPECTED_CONFIG = json.load(__STREAM)
def main() -> None:
rename_file("cspell.json", str(CONFIG_PATH.cspell))
executor = Executor()
executor(_update_cspell_repo_url)
config = PrecommitConfig.load()
repo = config.find_repo(__REPO_URL)
if repo is None:
executor(_remove_configuration)
else:
executor(_check_check_hook_options)
executor(_fix_config_content)
executor(_sort_config_entries)
executor(_check_editor_config)
executor(_update_prettier_ignore)
executor(add_badge, __BADGE)
executor(add_vscode_extension_recommendation, __VSCODE_EXTENSION_NAME)
if executor.error_messages:
raise PrecommitError(executor.merge_messages())
def _update_cspell_repo_url(path: Path = CONFIG_PATH.precommit) -> None:
old_url_patters = [
r".*/mirrors-cspell(.git)?$",
]
config = PrecommitConfig.load(path)
for pattern in old_url_patters:
repo_index = config.get_repo_index(pattern)
if repo_index is None:
continue
config_dict, yaml_parser = load_round_trip_precommit_config(path)
config_dict["repos"][repo_index]["repo"] = __REPO_URL
yaml_parser.dump(config_dict, path)
raise PrecommitError(
f"Updated cSpell pre-commit repo URL to {__REPO_URL} in {path}"
)
def _remove_configuration() -> None:
if CONFIG_PATH.cspell.exists():
os.remove(CONFIG_PATH.cspell)
raise PrecommitError(
f'"{CONFIG_PATH.cspell}" is no longer required'
" and has been removed"
)
if CONFIG_PATH.editor_config.exists():
with open(CONFIG_PATH.editor_config) as stream:
prettier_ignore_content = stream.readlines()
expected_line = str(CONFIG_PATH.cspell) + "\n"
if expected_line in set(prettier_ignore_content):
prettier_ignore_content.remove(expected_line)
with open(CONFIG_PATH.editor_config, "w") as stream:
stream.writelines(prettier_ignore_content)
raise PrecommitError(
f'"{CONFIG_PATH.cspell}" in {CONFIG_PATH.editor_config}'
" is no longer required and has been removed"
)
executor = Executor()
executor(remove_badge, __BADGE_PATTERN)
executor(remove_vscode_extension_recommendation, __VSCODE_EXTENSION_NAME)
if executor.error_messages:
raise PrecommitError(executor.merge_messages())
def _check_check_hook_options() -> None:
config = PrecommitConfig.load()
repo = config.find_repo(__REPO_URL)
if repo is None:
raise PrecommitError(
f"{CONFIG_PATH.precommit} is missing a repo: {__REPO_URL}"
)
expected_yaml = f"""
- repo: {__REPO_URL}
rev: ...
hooks:
- id: cspell
"""
repo_dict = repo.dict(skip_defaults=True)
expected_dict = yaml.safe_load(expected_yaml)[0]
if (
list(repo_dict) != list(expected_dict)
or [h.dict(skip_defaults=True) for h in repo.hooks]
!= expected_dict["hooks"]
):
raise PrecommitError(
"cSpell pre-commit hook should have the following form:\n"
+ expected_yaml
)
def _fix_config_content() -> None:
if not CONFIG_PATH.cspell.exists():
with open(CONFIG_PATH.cspell, "w") as stream:
stream.write("{}")
config = __get_config(CONFIG_PATH.cspell)
fixed_sections = []
for section_name in __EXPECTED_CONFIG:
if section_name in {"words", "ignoreWords"}:
if section_name not in config:
fixed_sections.append('"' + section_name + '"')
config[section_name] = []
continue
expected_section_content = __get_expected_content(config, section_name)
section_content = config.get(section_name)
if section_content == expected_section_content:
continue
fixed_sections.append('"' + section_name + '"')
config[section_name] = expected_section_content
if fixed_sections:
__write_config(config)
error_message = __express_list_of_sections(fixed_sections)
error_message += f" in {CONFIG_PATH.cspell} has been updated."
raise PrecommitError(error_message)
def _sort_config_entries() -> None:
config = __get_config(CONFIG_PATH.cspell)
error_message = ""
fixed_sections = []
for section, section_content in config.items():
if not isinstance(section_content, list):
continue
sorted_section_content = __sort_section(section_content)
if section_content == sorted_section_content:
continue
fixed_sections.append('"' + section + '"')
config[section] = sorted_section_content
if fixed_sections:
__write_config(config)
error_message = __express_list_of_sections(fixed_sections)
error_message += (
f" in {CONFIG_PATH.cspell} has been sorted alphabetically."
)
raise PrecommitError(error_message)
def _check_editor_config() -> None:
if not CONFIG_PATH.editor_config.exists():
return
cfg = ConfigParser()
with open(CONFIG_PATH.editor_config) as stream:
# https://stackoverflow.com/a/24501036/13219025
cfg.read_file(
itertools.chain(["[global]"], stream),
source=str(CONFIG_PATH.editor_config),
)
if not cfg.has_section(str(CONFIG_PATH.cspell)):
raise PrecommitError(
f"{CONFIG_PATH.editor_config} has no section"
f' "[{CONFIG_PATH.cspell}]"'
)
expected_options = {
"indent_size": "4",
}
options = dict(cfg.items(str(CONFIG_PATH.cspell)))
if options != expected_options:
error_message = (
f"{CONFIG_PATH.editor_config} should have the following"
" section:\n\n"
)
section_content = f"[{CONFIG_PATH.cspell}]\n"
for option, value in expected_options.items():
section_content += f"{option} = {value}\n"
section_content = textwrap.indent(section_content, prefix=" ")
raise PrecommitError(error_message + section_content)
def _update_prettier_ignore() -> None:
config = PrecommitConfig.load()
repo = config.find_repo(__REPO_URL)
if repo is None:
return
prettier_ignore_path = ".prettierignore"
expected_line = str(CONFIG_PATH.cspell) + "\n"
if not os.path.exists(prettier_ignore_path):
with open(prettier_ignore_path, "w") as stream:
stream.write(expected_line)
else:
with open(prettier_ignore_path) as stream:
prettier_ignore_content = stream.readlines()
if expected_line in set(prettier_ignore_content):
return
with open(prettier_ignore_path, "w+") as stream:
stream.write(expected_line)
raise PrecommitError(
f'Added "{CONFIG_PATH.cspell}" to {prettier_ignore_path}"'
)
def __get_expected_content(
config: dict, section: str, *, extend: bool = False
) -> Any:
if section not in config:
return __EXPECTED_CONFIG[section]
section_content = config[section]
if section not in __EXPECTED_CONFIG:
return section_content
expected_section_content = __EXPECTED_CONFIG[section]
if isinstance(expected_section_content, str):
return expected_section_content
if isinstance(expected_section_content, list):
if not extend:
return __sort_section(expected_section_content)
expected_section_content_set = set(expected_section_content)
expected_section_content_set.update(section_content)
return __sort_section(expected_section_content_set)
raise NotImplementedError(
"No implementation for section content of type"
f' {section_content.__class__.__name__} (section: "{section}"'
)
def __express_list_of_sections(sections: Sequence[str]) -> str:
"""Convert list of sections into natural language.
>>> __express_list_of_sections(["one"])
'Section one'
>>> __express_list_of_sections(["one", "two"])
'Sections one and two'
>>> __express_list_of_sections(["one", "two", "three"])
'Sections one, two, and three'
>>> __express_list_of_sections([])
''
"""
if not sections:
return ""
sentence = "Section"
if len(sections) == 1:
sentence += " " + sections[0]
else:
sentence += "s "
sentence += ", ".join(sections[:-1])
if len(sections) > 2:
sentence += ","
sentence += " and " + sections[-1]
return sentence
def __get_config(path: Union[str, Path]) -> dict:
with open(path) as stream:
return json.load(stream)
def __write_config(config: dict) -> None:
with open(CONFIG_PATH.cspell, "w") as stream:
json.dump(config, stream, indent=4, ensure_ascii=False)
stream.write("\n")
def __sort_section(content: Iterable[str]) -> List[str]:
"""Sort a list section.
>>> __sort_section({"one", "Two"})
['one', 'Two']
"""
return sorted(content, key=lambda s: s.lower())
|
nilq/baby-python
|
python
|
# Date: 01/27/2021
# Author: Borneo Cyber
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
sys.path.append("..")
import unittest
import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test_xpu import OpTest, XPUOpTest
import paddle
from paddle.fluid import Program, program_guard
class TestXPUClipByNormOp(XPUOpTest):
def setUp(self):
self.op_type = "clip_by_norm"
self.dtype = np.float32
self.use_xpu = True
self.max_relative_error = 0.006
self.initTestCase()
input = np.random.random(self.shape).astype("float32")
input[np.abs(input) < self.max_relative_error] = 0.5
self.inputs = {'X': input, }
self.attrs = {}
self.attrs['max_norm'] = self.max_norm
norm = np.sqrt(np.sum(np.square(input)))
if norm > self.max_norm:
output = self.max_norm * input / norm
else:
output = input
self.outputs = {'Out': output}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def initTestCase(self):
self.shape = (100, )
self.max_norm = 1.0
class TestCase1(TestXPUClipByNormOp):
def initTestCase(self):
self.shape = (100, )
self.max_norm = 1e20
class TestCase2(TestXPUClipByNormOp):
def initTestCase(self):
self.shape = (16, 16)
self.max_norm = 0.1
class TestCase3(TestXPUClipByNormOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max_norm = 1.0
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
username = "YourInstagramUsername"
password = "YourInstagramPassword"
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.