index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
10,100 | 3161ee98445156af569967ccf26987c18df29669 | r"""The ``fourier`` module provides the core functionality of MPoL via :class:`mpol.fourier.FourierCube`."""
from __future__ import annotations
from typing import Any
import numpy as np
import torch
import torch.fft # to avoid conflicts with old torch.fft *function*
import torchkbnufft
from numpy import complexfloating, floating
from numpy.typing import NDArray
from torch import nn
from mpol.exceptions import DimensionMismatchError
from mpol.images import ImageCube
from mpol.protocols import MPoLModel
from . import utils
from .coordinates import GridCoords
class FourierCube(nn.Module):
r"""
This layer performs the FFT of an ImageCube and stores the corresponding dense FFT output as a cube. If you are using this layer in a forward-modeling RML workflow, because the FFT of the model is essentially stored as a grid, you will need to make the loss function calculation using a gridded loss function (e.g., :func:`mpol.losses.nll_gridded`) and a gridded dataset (e.g., :class:`mpol.datasets.GriddedDataset`).
Args:
coords (GridCoords): an object already instantiated from the GridCoords class.
persistent_vis (Boolean): should the visibility cube be stored as part of the modules `state_dict`? If `True`, the state of the UV grid will be stored. It is recommended to use `False` for most applications, since the visibility cube will rarely be a direct parameter of the model.
"""
def __init__(self, coords: GridCoords, persistent_vis: bool = False):
super().__init__()
# TODO: Is this comment relevant? There was no nchan instantiation
# before
# ---
# we don't want to bother with the nchan argument here, so
# we don't use the convenience method _setup_coords
# and just do it manually
self.coords = coords
self.register_buffer("vis", None, persistent=persistent_vis)
self.vis: torch.Tensor
@classmethod
def from_image_properties(
cls, cell_size: float, npix: int, persistent_vis: bool = False
) -> FourierCube:
r"""Alternative method for instantiating a FourierCube from ``cell_size``
and ``npix``
Args:
cell_size (float): the width of an image-plane pixel [arcseconds]
npix (int): the number of pixels per image side
persistent_vis (Boolean): should the visibility cube be stored as part of the modules `state_dict`? If `True`, the state of the UV grid will be stored. It is recommended to use `False` for most applications, since the visibility cube will rarely be a direct parameter of the model.
"""
coords = GridCoords(cell_size, npix)
return cls(coords, persistent_vis)
def forward(self, cube: torch.Tensor) -> torch.Tensor:
"""
Perform the FFT of the image cube on each channel.
Args:
cube (torch.double tensor, of shape ``(nchan, npix, npix)``): a prepacked image cube, for example, from ImageCube.forward()
Returns:
(torch.complex tensor, of shape ``(nchan, npix, npix)``): the FFT of the image cube, in packed format.
"""
# make sure the cube is 3D
assert cube.dim() == 3, "cube must be 3D"
# the self.cell_size prefactor (in arcsec) is to obtain the correct output units
# since it needs to correct for the spacing of the input grid.
# See MPoL documentation and/or TMS Eqn A8.18 for more information.
self.vis = self.coords.cell_size**2 * torch.fft.fftn(cube, dim=(1, 2))
return self.vis
@property
def ground_vis(self) -> torch.Tensor:
r"""
The visibility cube in ground format cube fftshifted for plotting with ``imshow``.
Returns:
(torch.complex tensor, of shape ``(nchan, npix, npix)``): the FFT of the image cube, in sky plane format.
"""
return utils.packed_cube_to_ground_cube(self.vis)
@property
def ground_amp(self) -> torch.Tensor:
r"""
The amplitude of the cube, arranged in unpacked format corresponding to the FFT of the sky_cube. Array dimensions for plotting given by ``self.coords.vis_ext``.
Returns:
torch.double : 3D amplitude cube of shape ``(nchan, npix, npix)``
"""
return torch.abs(self.ground_vis)
@property
def ground_phase(self) -> torch.Tensor:
r"""
The phase of the cube, arranged in unpacked format corresponding to the FFT of the sky_cube. Array dimensions for plotting given by ``self.coords.vis_ext``.
Returns:
torch.double : 3D phase cube of shape ``(nchan, npix, npix)``
"""
return torch.angle(self.ground_vis)
def safe_baseline_constant_meters(
uu: NDArray[floating[Any]],
vv: NDArray[floating[Any]],
freqs: NDArray[floating[Any]],
coords: GridCoords,
uv_cell_frac: float = 0.05,
) -> bool:
r"""
This routine determines whether the baselines can safely be assumed to be constant with channel when they converted from meters to units of kilolambda.
The antenna baselines *are* the same as a function of channel when they are measured in physical distance units, such as meters. However, when these baselines are converted to spatial frequency units, via
.. math::
u = \frac{D}{\lambda},
it's possible that the :math:`u` and :math:`v` values of each channel are significantly different if the :math:`\lambda` values of each channel are significantly different. This routine evaluates whether the maximum change in :math:`u` or :math:`v` across channels (when represented in kilolambda) is smaller than some threshold value, calculated as the fraction of a :math:`u,v` cell defined by ``coords``.
If this function returns ``True``, then it would be safe to proceed with parallelization in the :class:`mpol.fourier.NuFFT` layer via the coil dimension.
Args:
uu (1D np.array): a 1D array of length ``nvis`` array of the u (East-West) spatial frequency coordinate in units of [m]
vv (1D np.array): a 1D array of length ``nvis`` array of the v (North-South) spatial frequency coordinate in units of [m]
freqs (1D np.array): a 1D array of length ``nchan`` of the channel frequencies, in units of [Hz].
coords: a :class:`mpol.coordinates.GridCoords` object which represents the image and uv-grid dimensions.
uv_cell_frac (float): the maximum threshold for a change in :math:`u` or :math:`v` spatial frequency across channels, measured as a fraction of the :math:`u,v` cell defined by ``coords``.
Returns:
boolean: `True` if it is safe to assume that the baselines are constant with channel (at a tolerance of ``uv_cell_frac``.) Otherwise returns `False`.
"""
# broadcast and convert baselines to kilolambda across channel
uu, vv = utils.broadcast_and_convert_baselines(uu, vv, freqs)
# should be (nchan, nvis) arrays
# convert uv_cell_frac to a kilolambda threshold
delta_uv = uv_cell_frac * coords.du # [klambda]
# find maximum change in baseline across channel
# concatenate arrays to save steps
uv = np.array([uu, vv]) # (2, nchan, nvis) arrays
# find max - min along channel axis
uv_min = uv.min(axis=1)
uv_max = uv.max(axis=1)
uv_diff = uv_max - uv_min
# find maximum of that
max_diff: float = uv_diff.max()
# compare to uv_cell_frac
return max_diff < delta_uv
def safe_baseline_constant_kilolambda(
uu: NDArray[floating[Any]],
vv: NDArray[floating[Any]],
coords: GridCoords,
uv_cell_frac: float = 0.05,
) -> bool:
r"""
This routine determines whether the baselines can safely be assumed to be constant with channel, when the are represented in units of kilolambda.
Compared to :class:`mpol.fourier.safe_baseline_constant_meters`, this function works with multidimensional arrays of ``uu`` and ``vv`` that are shape (nchan, nvis) and have units of kilolambda.
If this routine returns True, then it should be safe for the user to either average the baselines across channel or simply choose a single, representative channel. This would enable parallelization in the {class}`mpol.fourier.NuFFT` via the coil dimension.
Args:
uu (1D np.array): a 1D array of length ``nvis`` array of the u (East-West) spatial frequency coordinate in units of [m]
vv (1D np.array): a 1D array of length ``nvis`` array of the v (North-South) spatial frequency coordinate in units of [m]
freqs (1D np.array): a 1D array of length ``nchan`` of the channel frequencies, in units of [Hz].
coords: a :class:`mpol.coordinates.GridCoords` object which represents the image and uv-grid dimensions.
uv_cell_frac (float): the maximum threshold for a change in :math:`u` or :math:`v` spatial frequency across channels, measured as a fraction of the :math:`u,v` cell defined by ``coords``.
Returns:
boolean: `True` if it is safe to assume that the baselines are constant with channel (at a tolerance of ``uv_cell_frac``.) Otherwise returns `False`.
"""
# convert uv_cell_frac to a kilolambda threshold
delta_uv = uv_cell_frac * coords.du # [klambda]
# find maximum change in baseline across channel
# concatenate arrays to save steps
uv = np.array([uu, vv]) # (2, nchan, nvis) arrays
# find max - min along channel axis
uv_min = uv.min(axis=1)
uv_max = uv.max(axis=1)
uv_diff = uv_max - uv_min
# find maximum of that
max_diff: float = uv_diff.max()
# compare to uv_cell_frac
return max_diff < delta_uv
class NuFFT(nn.Module):
r"""
This layer translates input from an :class:`mpol.images.ImageCube` directly to loose, ungridded samples of the Fourier plane, directly corresponding to the :math:`u,v` locations of the data. This layer is different than a :class:`mpol.Fourier.FourierCube` in that, rather than producing the dense cube-like output from an FFT routine, it utilizes the non-uniform FFT or 'NuFFT' to interpolate directly to discrete :math:`u,v` locations that need not correspond to grid cell centers. This is implemented using the KbNufft routines of the `TorchKbNufft <https://torchkbnufft.readthedocs.io/en/stable/index.html>`_ package.
**Dimensionality**: One consideration when using this layer is the dimensionality of your image and your visibility samples. If your image has multiple channels (``nchan > 1``), there is the possibility that the :math:`u,v` sample locations corresponding to each channel may be different. In ALMA/VLA applications, this can arise when continuum observations are taken over significant bandwidth, since the spatial frequency sampled by any pair of antennas is wavelength-dependent
.. math::
u = \frac{D}{\lambda},
where :math:`D` is the projected baseline (measured in, say, meters) and :math:`\lambda` is the observing wavelength. In this application, the image-plane model could be the same for each channel, or it may vary with channel (necessary if the spectral slope of the source is significant).
On the other hand, with spectral line observations it will usually be the case that the total bandwidth of the observations is small enough such that the :math:`u,v` sample locations could be considered as the same for each channel. In spectral line applications, the image-plane model usually varies substantially with each channel.
This layer will determine whether the spatial frequencies are treated as constant based upon the dimensionality of the ``uu`` and ``vv`` input arguments.
* If ``uu`` and ``vv`` have a shape of (``nvis``), then it will be assumed that the spatial frequencies can be treated as constant with channel (and will invoke parallelization across the image cube ``nchan`` dimension using the 'coil' dimension of the TorchKbNufft package).
* If the ``uu`` and ``vv`` have a shape of (``nchan, nvis``), then it will be assumed that the spatial frequencies are different for each channel, and the spatial frequencies provided for each channel will be used (and will invoke parallelization across the image cube ``nchan`` dimension using the 'batch' dimension of the TorchKbNufft package).
Note that there is no straightforward, computationally efficient way to proceed if there are a different number of spatial frequencies for each channel. The best approach is likely to construct ``uu`` and ``vv`` arrays that have a shape of (``nchan, nvis``), such that all channels are padded with bogus :math:`u,v` points to have the same length ``nvis``, and you create a boolean mask to keep track of which points are valid. Then, when this routine returns data points of shape (``nchan, nvis``), you can use that boolean mask to select only the valid :math:`u,v` points points.
**Interpolation mode**: You may choose the type of interpolation mode that KbNufft uses under the hood by changing the boolean value of ``sparse_matrices``. For repeated evaluations of this layer (as might exist within an optimization loop), ``sparse_matrices=True`` is likely to be the more accurate and faster choice. If ``sparse_matrices=False``, this routine will use the default table-based interpolation of TorchKbNufft. Note that as of TorchKbNuFFT version 1.4.0, sparse matrices are not yet available when parallelizing using the 'batch' dimension --- this will result in a warning.
Args:
cell_size (float): the width of an image-plane pixel [arcseconds]
npix (int): the number of pixels per image side
coords (GridCoords): an object already instantiated from the GridCoords class. If providing this, cannot provide ``cell_size`` or ``npix``.
nchan (int): the number of channels in the :class:`mpol.images.ImageCube`. Default = 1.
uu (np.array): a length ``nvis`` array (not including Hermitian pairs) of the u (East-West) spatial frequency coordinate [klambda]
vv (np.array): a length ``nvis`` array (not including Hermitian pairs) of the v (North-South) spatial frequency coordinate [klambda]
"""
def __init__(
self,
coords: GridCoords,
uu: NDArray[floating[Any]],
vv: NDArray[floating[Any]],
nchan: int = 1,
sparse_matrices: bool = True,
):
super().__init__()
if not (same_uv := uu.ndim == 1 and vv.ndim == 1) and sparse_matrices:
import warnings
warnings.warn(
"Provided uu and vv arrays are multi-dimensional, suggesting an "
"intent to parallelize using the 'batch' dimension. This feature "
"is not yet available in TorchKbNuFFT v1.4.0 with sparse matrix "
"interpolation (sparse_matrices=True), therefore we are proceeding "
"with table interpolation (sparse_matrices=False).",
category=RuntimeWarning,
)
sparse_matrices = False
self.interp_mat = None
self.coords = coords
self.nchan = nchan
self.sparse_matrices = sparse_matrices
self.same_uv = same_uv
# initialize the non-uniform FFT object
self.nufft_ob = torchkbnufft.KbNufft(
im_size=(self.coords.npix, self.coords.npix)
)
self.register_buffer("k_traj", self._assemble_ktraj(uu, vv))
self.k_traj: torch.Tensor
if self.sparse_matrices:
# precompute the sparse interpolation matrices
real_interp_mat, imag_interp_mat = torchkbnufft.calc_tensor_spmatrix(
self.k_traj, im_size=(self.coords.npix, self.coords.npix)
)
self.register_buffer("real_interp_mat", real_interp_mat)
self.register_buffer("imag_interp_mat", imag_interp_mat)
self.real_interp_mat: torch.Tensor
self.imag_interp_mat: torch.Tensor
@classmethod
def from_image_properties(
cls,
cell_size: float,
npix: int,
uu: NDArray[floating[Any]],
vv: NDArray[floating[Any]],
nchan: int = 1,
sparse_matrices: bool = True,
) -> NuFFT:
coords = GridCoords(cell_size, npix)
return cls(coords, uu, vv, nchan, sparse_matrices)
def _klambda_to_radpix(
self, klambda: float | NDArray[floating[Any]]
) -> float | NDArray[floating[Any]]:
"""Convert a spatial frequency in units of klambda to 'radians/sky pixel,' using the pixel cell_size provided by ``self.coords.dl``.
These concepts can be a little confusing because there are two angular measures at play.
1. The first is the normal angular sky coordinate, normally measured in arcseconds for typical sources observed by ALMA or the VLA. Arcseconds, being an angular coordinate, can equivalently be expressed in units of radians. To avoid confusion, we will call this angular measurement 'sky radians.' Alternatively, for a given image grid, this same sky coordinate could be expressed in units of sky pixels.
2. The second is the spatial frequency of some image-plane function, :math:`I_\nu(l,m)`, which we could quote in units of 'cycles per arcsecond' or 'cycles per sky pixel,' for example. With a radio interferometer, spatial frequencies are typically quoted in units of the observing wavelength, i.e., lambda or kilo-lambda. If the field of view of the image is small, thanks to the small-angle approximation, units of lambda are directly equivalent to 'cycles per sky radian.' The second angular measure comes about when converting the spatial frequency from a linear measure of frequency 'cycles per sky radian' to an angular measure of frequency 'radians per sky radian' or 'radians per sky pixel.'
The TorchKbNufft package expects k-trajectory vectors in units of 'radians per sky pixel.' This routine helps convert spatial frequencies from their default unit (kilolambda) into 'radians per sky pixel' using the pixel cell_size as provided by ``self.coords.dl``.
Args:
klambda (float): spatial frequency in units of kilolambda
Returns:
float: spatial frequency measured in units of radian per sky pixel
"""
# convert from kilolambda to cycles per sky radian
u_lam = klambda * 1e3 # [lambda, or cycles/radian]
# convert from 'cycles per sky radian' to 'radians per sky radian'
u_rad_per_rad = u_lam * 2 * np.pi # [radians / sky radian]
# size of pixel in radians
# self.coords.dl # [sky radians/pixel]
# convert from 'radians per sky radian' to 'radians per sky pixel'
u_rad_per_pix = u_rad_per_rad * self.coords.dl # [radians / pixel]
return u_rad_per_pix
def _assemble_ktraj(
self, uu: NDArray[floating[Any]], vv: NDArray[floating[Any]]
) -> torch.Tensor:
r"""
This routine converts a series of :math:`u, v` coordinates into a k-trajectory vector for the torchkbnufft routines. The dimensionality of the k-trajectory vector will influence how TorchKbNufft will perform the operations.
* If ``uu`` and ``vv`` have a 1D shape of (``nvis``), then it will be assumed that the spatial frequencies can be treated as constant with channel. This will result in a ``k_traj`` vector that has shape (``2, nvis``), such that parallelization will be across the image cube ``nchan`` dimension using the 'coil' dimension of the TorchKbNufft package.
* If the ``uu`` and ``vv`` have a 2D shape of (``nchan, nvis``), then it will be assumed that the spatial frequencies are different for each channel, and the spatial frequencies provided for each channel will be used. This will result in a ``k_traj`` vector that has shape (``nchan, 2, nvis``), such that parallelization will be across the image cube ``nchan`` dimension using the 'batch' dimension of the TorchKbNufft package.
Args:
uu (1D or 2D numpy array): u (East-West) spatial frequency coordinate [klambda]
vv (1D or 2D numpy array): v (North-South) spatial frequency coordinate [klambda]
Returns:
k_traj (torch tensor): a k-trajectory vector with shape
"""
uu_radpix = self._klambda_to_radpix(uu)
vv_radpix = self._klambda_to_radpix(vv)
assert isinstance(uu_radpix, np.ndarray)
assert isinstance(vv_radpix, np.ndarray)
# if uu and vv are 1D dimension, then we can assume that we will parallelize across the coil dimension.
# otherwise, we assume that we will parallelize across the batch dimension.
if self.same_uv:
# k-trajectory needs to be packed the way the image is packed (y,x), so
# the trajectory needs to be packed (v, u)
# if TorchKbNufft receives a k-traj tensor of shape (2, nvis), it will parallelize across the coil dimension, assuming
# that the k-traj is the same for all coils/channels.
# interim convert to numpy array because of torch warning about speed
k_traj = torch.tensor(np.array([vv_radpix, uu_radpix]))
return k_traj
# in this case, we are given two tensors of shape (nchan, nvis)
# first, augment each tensor individually to create a (nbatch, 1, nvis) tensor
# then, concatenate the tensors along the axis=1 dimension.
if uu_radpix.shape[0] != self.nchan:
raise DimensionMismatchError(
f"nchan of uu ({uu_radpix.shape[0]}) is more than one but different than that used to initialize the NuFFT layer ({self.nchan})"
)
if vv_radpix.shape[0] != self.nchan:
raise DimensionMismatchError(
f"nchan of vv ({vv_radpix.shape[0]}) is more than one but different than that used to initialize the NuFFT layer ({self.nchan})"
)
uu_radpix_aug = torch.unsqueeze(torch.tensor(uu_radpix), 1)
vv_radpix_aug = torch.unsqueeze(torch.tensor(vv_radpix), 1)
# interim convert to numpy array because of torch warning about speed
k_traj = torch.cat([vv_radpix_aug, uu_radpix_aug], dim=1)
# if TorchKbNufft receives a k-traj tensor of shape (nbatch, 2, nvis), it will parallelize across the batch dimension
return k_traj
def forward(self, cube: torch.Tensor) -> torch.Tensor:
r"""
Perform the FFT of the image cube for each channel and interpolate to the ``uu`` and ``vv`` points set at layer initialization. This call should automatically take the best parallelization option as indicated by the shape of the ``uu`` and ``vv`` points.
Args:
cube (torch.double tensor): of shape ``(nchan, npix, npix)``). The cube should be a "prepacked" image cube, for example, from :meth:`mpol.images.ImageCube.forward`
Returns:
torch.complex tensor: of shape ``(nchan, nvis)``, Fourier samples evaluated corresponding to the ``uu``, ``vv`` points set at initialization.
"""
# make sure that the nchan assumptions for the ImageCube and the NuFFT
# setup are the same
if cube.size(0) != self.nchan:
raise DimensionMismatchError(
f"nchan of ImageCube ({cube.size(0)}) is different than that used to initialize NuFFT layer ({self.nchan})"
)
# "unpack" the cube, but leave it flipped
# NuFFT routine expects a "normal" cube, not an fftshifted one
shifted = torch.fft.fftshift(cube, dim=(1, 2))
# convert the cube to a complex type, since this is required by TorchKbNufft
complexed = shifted.type(torch.complex128)
# Consider how the similarity of the spatial frequency samples should be
# treated. We already took care of this on the k_traj side, since we set
# the shapes. But this also needs to be taken care of on the image side.
# * If we plan to parallelize with the coil dimension, then we need an
# image with shape (1, nchan, npix, npix).
# * If we plan to parallelize using the batch dimension, then we need
# an image with shape (nchan, 1, npix, npix).
if self.same_uv:
# we want to unsqueeze/squeeze at dim=0 to parallelize over the coil
# dimension
# unsquezee shape: [1, nchan, npix, npix]
altered_dimension = 0
else:
# we want to unsqueeze/squeeze at dim=1 to parallelize over the
# batch dimension
# unsquezee shape: [nchan, 1, npix, npix]
altered_dimension = 1
expanded = complexed.unsqueeze(altered_dimension)
# torchkbnufft uses a [nbatch, ncoil, npix, npix] scheme
output: torch.Tensor = self.coords.cell_size**2 * self.nufft_ob(
expanded,
self.k_traj,
interp_mats=(
(self.real_interp_mat, self.imag_interp_mat)
if self.sparse_matrices
else None
),
)
# squeezed shape: [nchan, npix, npix]
output = torch.squeeze(output, dim=altered_dimension)
return output
def make_fake_data(
image_cube: ImageCube,
uu: NDArray[floating[Any]],
vv: NDArray[floating[Any]],
weight: NDArray[floating[Any]],
) -> tuple[NDArray[complexfloating[Any, Any]], ...]:
r"""
Create a fake dataset from a supplied :class:`mpol.images.ImageCube`. See :ref:`mock-dataset-label` for more details on how to prepare a generic image for use in an :class:`~mpol.images.ImageCube`.
The provided visibilities can be 1d for a single continuum channel, or 2d for image cube. If 1d, visibilities will be converted to 2d arrays of shape ``(1, nvis)``.
Args:
imageCube (:class:`~mpol.images.ImageCube`): the image layer to put into a fake dataset
uu (numpy array): array of u spatial frequency coordinates, not including Hermitian pairs. Units of [:math:`\mathrm{k}\lambda`]
vv (numpy array): array of v spatial frequency coordinates, not including Hermitian pairs. Units of [:math:`\mathrm{k}\lambda`]
weight (2d numpy array): length array of thermal weights :math:`w_i = 1/\sigma_i^2`. Units of [:math:`1/\mathrm{Jy}^2`]
Returns:
(2-tuple): a two tuple of the fake data. The first array is the mock dataset including noise, the second array is the mock dataset without added noise.
"""
# instantiate a NuFFT object based on the ImageCube
# OK if uu shape (nvis,)
nufft = NuFFT(coords=image_cube.coords, nchan=image_cube.nchan, uu=uu, vv=vv)
# make into a multi-channel dataset, even if only a single-channel provided
if uu.ndim == 1:
uu = np.atleast_2d(uu)
vv = np.atleast_2d(vv)
weight = np.atleast_2d(weight)
# carry it forward to the visibilities, which will be (nchan, nvis)
vis_noiseless: NDArray[complexfloating[Any, Any]]
vis_noiseless = nufft(image_cube()).detach().numpy()
# generate complex noise
sigma = 1 / np.sqrt(weight)
noise = np.random.normal(
loc=0, scale=sigma, size=uu.shape
) + 1.0j * np.random.normal(loc=0, scale=sigma, size=uu.shape)
# add to data
vis_noise = vis_noiseless + noise
return vis_noise, vis_noiseless
def get_vis_residuals(
model: MPoLModel,
u_true: NDArray[floating[Any]],
v_true: NDArray[floating[Any]],
V_true: NDArray[complexfloating[Any, Any]],
channel: int = 0,
) -> NDArray[complexfloating[Any, Any]]:
r"""
Use `mpol.fourier.NuFFT` to get residuals between gridded `model` and loose
(ungridded) data visiblities at data (u, v) coordinates
Parameters
----------
model : `torch.nn.Module` object
Instance of the `mpol.precomposed.SimpleNet` class. Contains model
visibilities.
u_true, v_true : array, unit=[k\lambda]
Data u- and v-coordinates
V_true : array, unit=[Jy]
Data visibility amplitudes
channel : int, default=0
Channel (of `model`) to use to calculate residual visibilities
Returns
-------
vis_resid : array of complex
Model loose residual visibility amplitudes of the form
Re(V) + 1j * Im(V)
"""
nufft = NuFFT(coords=model.coords, nchan=model.nchan, uu=u_true, vv=v_true)
vis_model = nufft(model.icube())
# convert to numpy, select channel
vis_model = vis_model.detach().numpy()[channel]
vis_resid: NDArray[complexfloating[Any, Any]]
vis_resid = V_true - vis_model
return vis_resid
|
10,101 | dc8030ce2a8c415209ecc19ca9926055c46a12b8 | # longest_palindrome2.py
class Solution:
def longestPalindrome(self, s):
map_dict = dict()
for i in s:
if i in map_dict.keys():
map_dict[i] += 1
else:
map_dict[i] = 1
result = 0
mark = 0
for j in map_dict.keys():
if map_dict[j] % 2 == 1:
mark += 1
result += map_dict[j] // 2
result = result * 2 + 1 if mark > 0 else result * 2
return result if result > 0 else (1 if mark > 0 else 0)
s = "abccccdd"
sol = Solution()
print(s)
print(sol.longestPalindrome(s))
|
10,102 | 6528abc78922a64f4353a2d7541e4a0938d601de | """Test antisymmetric_projection."""
import numpy as np
from toqito.perms import antisymmetric_projection
def test_antisymmetric_projection_d_2_p_1():
"""Dimension is 2 and p is equal to 1."""
res = antisymmetric_projection(2, 1).todense()
expected_res = np.array([[1, 0], [0, 1]])
bool_mat = np.isclose(res, expected_res)
np.testing.assert_equal(np.all(bool_mat), True)
def test_antisymmetric_projection_p_larger_than_d():
"""The `p` value is greater than the dimension `d`."""
res = antisymmetric_projection(2, 3).todense()
expected_res = np.zeros((8, 8))
bool_mat = np.isclose(res, expected_res)
np.testing.assert_equal(np.all(bool_mat), True)
def test_antisymmetric_projection_2():
"""The dimension is 2."""
res = antisymmetric_projection(2).todense()
expected_res = np.array([[0, 0, 0, 0], [0, 0.5, -0.5, 0], [0, -0.5, 0.5, 0], [0, 0, 0, 0]])
bool_mat = np.isclose(res, expected_res)
np.testing.assert_equal(np.all(bool_mat), True)
def test_antisymmetric_projection_3_3_true():
"""The `dim` is 3, the `p` is 3, and `partial` is True."""
res = antisymmetric_projection(3, 3, True).todense()
np.testing.assert_equal(np.isclose(res[5].item(), -0.40824829), True)
if __name__ == "__main__":
np.testing.run_module_suite()
|
10,103 | 1fe5f2f76999c7484acfae831e2a66337f041601 | # Credit: http://realiseyourdreams.wordpress.com/latex-scripts/
#Script to clean up a directory tree!
import os,subprocess
#browse the directory
for dirname, dirnames, filenames in os.walk('.'):
for filename in filenames:
#Check every file for:
filepath = os.path.join(dirname, filename)
#hidden file
if filename.startswith('.'):
print 'remove hidden file ' + filepath
os.remove(filepath)
#backup file
elif filename.endswith('~'):
print 'remove backup file ' + filepath
os.remove(filepath)
#log file
elif filename.endswith('.log'):
print 'remove log file ' + filepath
os.remove(filepath)
elif filename.endswith('.fls') or filename.endswith('.fls') or filename.endswith('.bbl') or filename.endswith('.blg') or filename.endswith('.pdf') or filename.endswith('.aux'):
print 'remove log file ' + filepath
os.remove(filepath) |
10,104 | 17575318372e09039c8d94ed803192e24859cb05 | import subprocess
import shlex
import time
import os
import signal
PROCESS = []
while True:
ANSWER = input('ะัะฑะตัะธัะต ะดะตะนััะฒะธะต: q - ะฒัั
ะพะด, s - ะทะฐะฟัััะธัั ัะตัะฒะตั ะธ ะบะปะธะตะฝัั, x - ะทะฐะบัััั ะฒัะต ะพะบะฝะฐ: ')
if ANSWER == 'q':
break
elif ANSWER == 's':
PROCESS.append(subprocess.Popen('gnome-terminal -- python3 server.py', shell=True))
time.sleep(0.5)
PROCESS.append(subprocess.Popen('gnome-terminal -- python3 client.py -n Test_1', shell=True))
PROCESS.append(subprocess.Popen('gnome-terminal -- python3 client.py -n Test_2', shell=True))
PROCESS.append(subprocess.Popen('gnome-terminal -- python3 client.py -n Test_3', shell=True))
elif ANSWER == 'x':
while PROCESS:
VICTIM = PROCESS.pop()
VICTIM.kill()
VICTIM.terminate() |
10,105 | 133acc36f9c3b54ed808a209054f5efcddbb04ae | # Generated by Django 2.2.7 on 2020-02-03 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fillintheblanks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='fillquestion',
name='answer_order',
field=models.CharField(blank=True, choices=[('content', 'Content'), ('none', 'None')], help_text='The order in which multichoice answer options are displayed to the user', max_length=30, null=True, verbose_name='Answer Order'),
),
]
|
10,106 | d90d6cd69a792ba03697df51cb5224467c74deeb | from rest_framework import serializers
from .models import *
from django.contrib.auth import authenticate
from django.contrib.auth.models import Group
from rest_framework.response import Response
import json
from django.db import models
class LoginSerializer(serializers.ModelSerializer):
email = serializers.CharField()
password = serializers.CharField()
class Meta:
model = AppUser
fields = ['email', 'password']
def validate(self, data):
# Check Email Exists
# error_fields = {}
user = authenticate(**data)
if user:
return user
# error_fields['email'] = "You have entered incorrect credentials, please check"
raise serializers.ValidationError({'email': "You have entered incorrect credentials, please check"})
class PayloadSerializer(serializers.ModelSerializer):
class Meta:
model = AppUser
fields = ['id', 'email', 'role', 'first_name', 'last_name']
class AllUsersSerializer(serializers.ModelSerializer):
class Meta:
model = AppUser
fields = ['id', 'email', 'role']
class RegisterStudentSerializer(serializers.ModelSerializer):
class Meta:
model = AppUser
fields = ['first_name', 'last_name', 'email', 'password', 'department_name']
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
user = AppUser.objects.create_user(
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
email=validated_data['email'],
password=validated_data['password'],
department_name='Computer Science and Information Systems',
# graduation_status=validated_data['graduation_status'],
# cgpa=validated_data['cgpa'],
role='student'
)
group, created = Group.objects.get_or_create(name='student')
user.groups.add(group)
return user
class RegisterFacultySerializers(serializers.ModelSerializer):
class Meta:
model = AppUser
fields = ['first_name', 'last_name', 'email', 'password', 'department_name']
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
user = AppUser.objects.create_user(
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
email=validated_data['email'],
password=validated_data['password'],
department_name='Computer Science and Information Systems',
role='faculty'
)
group, created = Group.objects.get_or_create(name='faculty')
user.groups.add(group)
return user
class RegisterHodSerializers(serializers.ModelSerializer):
class Meta:
model = AppUser
fields = ['first_name', 'last_name', 'email', 'password', 'department_name']
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
user = AppUser.objects.create_user(
email=validated_data['email'],
password=validated_data['password'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
department_name='Computer Science and Information Systems',
role='hod'
)
group, created = Group.objects.get_or_create(name='hod')
user.groups.add(group)
return user
class RegisterAdminSerializers(serializers.ModelSerializer):
class Meta:
model = AppUser
fields = ['first_name', 'last_name', 'email', 'password', 'department_name']
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
user = AppUser.objects.create_user(
email=validated_data['email'],
password=validated_data['password'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
department_name='Computer Science and Information Systems',
role='admin',
is_superuser=True
)
group, created = Group.objects.get_or_create(name='admin')
user.groups.add(group)
return user
|
10,107 | 5e901074031de4e02fa68fa42c3ff780e14d8110 | import os
import spotipy
from spotipy.oauth2 import SpotifyOAuth
import pyautogui
import cv2
from datetime import datetime
from utils import *
class SpotifyControls:
"""
This class execute Spotify API commands based on the hand pose predicted from HandPoses.
The gesture controller uses a specific spotipy API call for each pose.
Keyword Arguments:
screen_proportion {float}: the proportion of gesture controller interaction area in 'mouse'
class, ie, proportion of area to mapper mouse movement.
(default: {0.75})
len_moving_average {float}: the moving average is used to
calculate the average of midpoint of five-fingers landmarks
in an array with the history of this midpoint. To this calculus, the
len_moving_average will be the length of this midpoint history array.
When this value has the tradeoff: increase this number improves the mouse
sensitivity, but delays the mouse iteration (midpoint update)
(default: {10})
"""
def __init__(self):
self.marked_pos = None
self.marked_uri = 'empty'
self.prev_index_finger_tip_y = None
self.prev_vol_datetime = None
self.username = os.environ['USERNAME']
# Authenticate with proper scopes
self.scope = "user-read-playback-state,user-modify-playback-state,user-library-modify"
self.sp_client = spotipy.Spotify(
client_credentials_manager=SpotifyOAuth(
scope=self.scope,
cache_path='/tmp/.cache-'+self.username,
username=self.username,
)
)
def execute_cmd(self, pose, lm, delay, frame):
"""
Execute Movement Method
This method execute movements (gesture controller) using pose class.
Arguments:
pose {string}: predicted hand pose
lm {string}: hands landmarks detected by HandDetect
delay {Delay}: class responsible to provoke delays on the execution frames
frame {cv2 Image, np.ndarray}: webcam frame
"""
if pose == 'pause_or_play':
try:
playback = self.sp_client.current_playback()
if playback is None or not playback['is_playing']:
self.sp_client.start_playback()
else:
self.sp_client.pause_playback()
except spotipy.exceptions.SpotifyException as e:
# print(e)
# print("Trying to find an active device...")
devs = self.sp_client.devices()['devices']
if len(devs) > 0:
dev_id = devs[0]['id']
self.sp_client.transfer_playback(dev_id)
else:
print("Tried to turn the volume up...")
print("Sorry, user needs to log into a device with Spotify!")
delay.reset_counter(20)
delay.set_in_action(True)
elif pose == 'connect_cycle':
try:
cur_dev_id = self.sp_client.current_playback()['device']['id']
devs = self.sp_client.devices()['devices']
cur_dev_idx = None
for i, dev in enumerate(devs):
if dev['id'] == cur_dev_id:
cur_dev_idx = i
new_dev_idx = cur_dev_idx - 1 # Loop backwards
new_dev_id = devs[new_dev_idx]['id']
self.sp_client.transfer_playback(new_dev_id)
except spotipy.exceptions.SpotifyException as e:
print("Tried to change device to connect_speaker (left)...")
print(e)
delay.reset_counter(20)
delay.set_in_action(True)
elif pose == 'next_track':
try:
self.sp_client.next_track()
except spotipy.exceptions.SpotifyException as e:
print("Tried to go to next track...")
print(e)
delay.reset_counter(10)
delay.set_in_action(True)
elif pose == 'previous_track':
try:
playback = self.sp_client.current_playback()
if playback is not None:
cur_uri = playback['item']['uri']
cur_pos = playback['progress_ms']
# Check if we have a valid mark in this track to skip back to
if self.marked_pos is not None and self.marked_pos < cur_pos \
and cur_uri == self.marked_uri:
self.sp_client.seek_track(self.marked_pos)
else:
if cur_pos < 6*1000: # Go to previous track
self.sp_client.previous_track(self.marked_pos)
else: # Go back to beginning of track
self.sp_client.seek_track(0)
except spotipy.exceptions.SpotifyException as e:
print("Tried to go to previous track...")
print(e)
delay.reset_counter(10)
delay.set_in_action(True)
elif pose == 'volume_slider':
try:
playback = self.sp_client.current_playback()
if playback is not None:
if self.prev_index_finger_tip_y is not None \
and self.prev_vol_datetime is not None \
and (datetime.now() - self.prev_vol_datetime).total_seconds() < 2.5:
cur_vol = playback['device']['volume_percent']
# print(f"DEBUG: Current volume {cur_vol}.")
# print(f"DEBUG: Landmarks: {lm[8*3+1]}")
cur_index_finger_tip_y = lm[8*3+1]
vol_diff = int((self.prev_index_finger_tip_y - cur_index_finger_tip_y)*200)
new_vol = max(0, min(100, cur_vol + vol_diff))
self.sp_client.volume(new_vol)
# print(f"DEBUG: New Volume: {new_vol}")
self.prev_index_finger_tip_y = lm[8*3+1]
self.prev_vol_datetime = datetime.now()
else:
self.prev_index_finger_tip_y = lm[8*3+1]
self.prev_vol_datetime = datetime.now()
# print(f"DEBUG: Setting volume reference point to {self.prev_index_finger_tip_y}")
else:
print("No active playback device... start playing Spotify somewhere.")
except spotipy.exceptions.SpotifyException as e:
print("Tried to set volume...")
print(e)
delay.reset_counter()
delay.set_in_action(True)
# E.g. 'skipback_2' or 'skipfwd_5'
elif pose[:9] == 'skipback_' or pose[:8] == 'skipfwd_':
n = int(pose[-1]) * (-1 if pose[:9] == 'skipback_' else 1)
try:
playback = self.sp_client.current_playback()
if playback is not None:
new_pos = max(playback['progress_ms']+int((3*n + 0.3)*1000), 0)
self.sp_client.seek_track(new_pos)
# print(f"DEBUG: Seek {(new_pos - playback['progress_ms'])/1000} seconds.")
else:
print("No active playback device... start playing Spotify somewhere.")
except spotipy.exceptions.SpotifyException as e:
print("Tried to skipback...")
print(e)
self.angle_now = None
delay.reset_counter()
delay.set_in_action(True)
elif pose == 'like':
try:
playback = self.sp_client.current_playback()
if playback is not None and playback['is_playing']:
track_id = playback['progress_ms']
self.sp_client.current_user_saved_tracks_add(tracks=[track_id])
except spotipy.exceptions.SpotifyException as e:
print("Tried to like a song...")
print(e)
delay.reset_counter(20)
delay.set_in_action(True)
elif pose == 'mark_pos':
try:
playback = self.sp_client.current_playback()
if playback is not None: # and playback['is_playing']:
cur_uri = playback['item']['uri']
if self.marked_uri == 'empty' or self.marked_uri != cur_uri:
self.marked_pos = playback['progress_ms']
self.marked_uri = playback['item']['uri']
print(f"DEBUG: Position {self.marked_pos} marked.")
else: # Delete old mark
print(f"DEBUG: Position {self.marked_pos} deleted.")
self.marked_pos = None
self.marked_uri = 'empty'
else:
print("No active playback device... start playing Spotify somewhere.")
except spotipy.exceptions.SpotifyException as e:
print("Tried to mark_pos...")
print(e)
delay.reset_counter(20) # Ignore a few more frames than usual to avoid undoing
delay.set_in_action(True)
|
10,108 | ce051e802eb45ba3bd5fcf4a1389eb9baf6a0408 | from inv.models import Item
from dogma.models import TypeAttributes
from dogma.const import *
def getCPUandPGandSlot(itemsIter):
"""Adds extra information to the given list of items.
Returns:
A list of tuples in the form (item, cpu, pg, classes).
- cpu and pg are the unbonused values of the CPU and PG attributes for that
item.
- classes is space delimited list of CSS classes representing the slot and
hardpoint type of the item.
Args:
itemsIter: An iterable over the items.
"""
items = []
for item in itemsIter:
cpu = pg = None
try:
cpu = TypeAttributes.objects.get(typeID=item, attributeID=ATTRIBUTE_CPU)
pg = TypeAttributes.objects.get(typeID=item, attributeID=ATTRIBUTE_PG)
except TypeAttributes.DoesNotExist:
pass
classes = []
if item.slot == Item.HIGH:
classes.append("high")
if item.slot == Item.MED:
classes.append("med")
if item.slot == Item.LOW:
classes.append("low")
if item.slot == Item.RIG:
classes.append("rig")
if item.slot == Item.SUB:
classes.append("sub")
if item.hardpoint == Item.MISSILE:
classes.append("missile")
if item.hardpoint == Item.TURRET:
classes.append("turret")
items.append((item, cpu, pg, " ".join(classes)))
return items
|
10,109 | 36e6f0b965827ee85ff79c8d591e8307e8b1a590 | import psycopg2
#open databse connection
db = psycopg2.connect(database = "postgres")
###################------------3g------------####################
#####Question 3g##### What percent of students transfer into one of the ABC majors?
cursor3g1 = db.cursor()
#find total num of student ended in ABC
cursor3g1.execute("SELECT COUNT(SID) FROM Student WHERE CUR_MAJOR = %s", ['ABC1'])
totalCurABC1 = cursor3g1.fetchone()
cursor3g1.execute("SELECT COUNT(SID) FROM Student WHERE CUR_MAJOR = %s", ['ABC2'])
totalCurABC2 = cursor3g1.fetchone()
#find num of student transfered from other major to ABC1
cursor3g1.execute("SELECT COUNT(SID) FROM Student WHERE CUR_MAJOR = %s AND PRE_ABC_MAJOR <> %s", ['ABC1', ''])
transToABC1 = cursor3g1.fetchone()
#find num of student transfered from other major to ABC2
cursor3g1.execute("SELECT COUNT(SID) FROM Student WHERE CUR_MAJOR = %s AND PRE_ABC_MAJOR <> %s", ['ABC2', ''])
transToABC2 = cursor3g1.fetchone()
#show results
print '*****3g***** The percent of students transfer into one of the ABC majors is %.8f' % ((float(transToABC1[0]) + float(transToABC2[0]))/(float(totalCurABC1[0])+float(totalCurABC2[0])))
cursor3g1.close()
#####Question 3g##### Top 5 majors that students transfer from into ABC and their percentage
cursor3g2 = db.cursor()
#get distinct majors that transfered to ABC
cursor3g2.execute("SELECT DISTINCT PRE_ABC_MAJOR FROM Student WHERE PRE_ABC_MAJOR <> %s",[''])
preABC = cursor3g2.fetchall()
#print(preABC)
#print(len(preABC))
cursor3g2.close()
#total student transfered in to ABC
sumTrans = 0
cursor3g3 = db.cursor()
#save in an array
#for loop, while i=0, i<array.length, i++
for index in range(len(preABC)):
#print preABC[index]
cursor3g3.execute("SELECT COUNT(SID) FROM Student WHERE PRE_ABC_MAJOR = %s",[preABC[index]])
cntTrans = cursor3g3.fetchone()
sumTrans = sumTrans + cntTrans[0]
preABC[index] = preABC[index] + cntTrans #(cursor3g3.fetchone())
#preABC[index].append(cursor3g3.fetchone())
#preABCnum.append(cursor3g3.fetchone())
#print(sumTrans)
#print(preABC)
preABC = sorted(preABC, key = lambda x:x[1], reverse = True)
#print(preABC)
#select each major from list and save their counts in another lists
#sort? find maxi five number
print '*****3g***** The top 5 majors that students transfer from into ABC and their percent are:'
index = 0
for x in preABC: #and index<6:
if(index>4):break
index = index+1
print '%d - Major: %s, precent: %.8f' % (index, x[0], float(x[1])/float(sumTrans))
#print preABC[index]
cursor3g3.close()
#disconnect from server
db.close()
|
10,110 | 6b9ca358d4a42aac3fb7f208435efecc22dab6c7 | """
This class is the most granular section of the Pabuito Grade Tool.
It takes a subcategory from the xml and generates all GUI components for grading.
It includes all class attributes required to retrieve and load grading attributes [comments, values, etc]
"""
import maya.cmds as cmds
# import maya.utils
import xml.etree.ElementTree as et
import re, sys
class SubcategoryGradeSection(object):
def __init__(self, subcategoryFromXML, defaultsFromXML, updateFunction, runAuto):
"""
take the element tree element 'subcategory' from the xml to generate the subcategory section'
"""
self.updateFunction = updateFunction
self.runAuto = runAuto
subcategory_width = 200
scrollField_height = 100
row_spacing = 0
self.current_grade_value = 0
self.current_comment_text = ''
self.current_default_comment_text = ''
self.current_example_comment_text = ''
self.auto_flagged_list = []
self.is_complete = False
self.subcatXML = subcategoryFromXML
self.log('trying to unpack gradeValue from defaults')
self.log('defaultsFromXML are: %s' % defaultsFromXML)
self.grade_values = defaultsFromXML.find('gradeValue')
self.log('grade_values: %s' % self.grade_values)
self.title = self.subcatXML.get('title')
if self.title == None:
self.title = self.subcatXML.find('title').text
self.weight = self.subcatXML.get('weight')
if self.weight == None:
self.weight = self.subcatXML.find('weight').text
# self.title = self.subcatXML.find('title').text
# self.log('section title: %s' % self.title)
# self.weight = self.subcatXML.find('weight').text
# self.log('section weight: %s' % self.weight)
try:
self.auto = self.subcatXML.find('auto').text
except AttributeError:
self.auto = ''
self.rmb = []
if self.subcatXML.findall('RMB'):
for item in self.subcatXML.findall('RMB'):
self.rmb.append([item.get('title'), item.text])
self.log('\nRiGHT HERE!')
if self.rmb != []:
for item in self.rmb:
self.log('\nTitle: {}\n{}'.format(item[0], item[1]))
self.log('RMB: {}'.format(self.rmb))
self.log('starting subcategory GUI')
self.subcat_main_column_layout = cmds.columnLayout(columnWidth = subcategory_width, rowSpacing = row_spacing)#sub cat main column columnLayout
self.titleText = cmds.text(label = self.title, align = 'left')
if self.auto != '':
cmds.popupMenu(parent = self.titleText, button = 3)
cmds.menuItem(label = 'Run Auto', command = lambda *args: self.runAuto(self.subcatXML, self, auto = True))
cmds.menuItem(label = 'Select Flagged', command = lambda *args: self.select_flagged())
self.int_field_slider_row_layout = cmds.rowLayout(numberOfColumns = 2)#int_field_slider_row_layout
self.grade_intField = cmds.intField( minValue=0, maxValue=150, step=1 , width = subcategory_width/6, changeCommand = lambda *args: self.update_subcategory('intField', *args))
self.grade_slider = cmds.intSlider( min=-100, max=0, value=0, step=1, width = subcategory_width*5/6, changeCommand = lambda *args: self.update_subcategory('slider', *args), dragCommand = lambda *args: self.update_subcategory('slider', *args))
cmds.setParent('..')
self.radio_creator(self.subcatXML.find('gradeComment'))
self.log('radios created, starting comment frames')
self.subcat_comments_frame_layout = cmds.frameLayout( label='Comments', collapsable = True, collapse = True, width = subcategory_width)
self.comments_text_field = cmds.scrollField(width = subcategory_width, height = scrollField_height, wordWrap = True, changeCommand = lambda *args: self.update_subcategory('comments_text', *args))
self.rmb_menu = cmds.popupMenu(parent = self.comments_text_field, button = 3)
# i = 0
if self.rmb != []:
for item in self.rmb:
self.log('{}:{}'.format(item[0], item[1]))
cmds.menuItem(label = item[0], command = lambda args, i = item[1]:self.add_comment_to_comments(i))
# i += 1
cmds.menuItem(label = 'Append session comment', command = lambda *args: self.append_session_commment())
cmds.button(label = 'Add Selected to Examples', width = subcategory_width, command = lambda *args: self.add_selected_to_examples(*args))
self.example_frameLayout = cmds.frameLayout( label='Example Objects', collapsable = True, collapse = True, width = subcategory_width)
self.example_comments = cmds.scrollField(width = subcategory_width, height = scrollField_height, wordWrap = True, changeCommand = lambda *args: self.update_subcategory('example_comments_text', *args))
cmds.setParent('..')
self.default_comments_frameLayout = cmds.frameLayout( label='Default Comments', collapsable = True, collapse = True, width = subcategory_width)
self.default_comments = cmds.scrollField(width = subcategory_width, height = scrollField_height, wordWrap = True, changeCommand = lambda *args: self.update_subcategory('default_comments_text', *args))
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
def select_flagged(self):
self.log('select flagged!')
if len(self.auto_flagged_list) == 0:
cmds.warning('No objects in flagged list')
else:
self.log('selecting objects')
cmds.select(self.auto_flagged_list)
self.log('objects selected')
def radio_creator(self, gradeComments):
"""
take the gradeComments element from the xml and create radio buttons labeled correctly
"""
labels = []
# self.log('gradeComments are: %s' % gradeComments)
for label in gradeComments:
labels.append(label)
# self.log('appending label: %s' % label)
# self.log('labels: %s' % labels)
cmds.rowLayout(numberOfColumns = len(labels)+1)
self.grade_radio_collection = cmds.radioCollection()
for label in labels:
self.log('processing label: {}'.format(label.tag))
tag = re.sub('plus', '+', label.tag)
self.log('processed label: {}\n'.format(tag))
cmds.radioButton(label = tag, changeCommand = lambda *args: self.update_subcategory('radioButton', *args), width = 30)
##
##
self.resetRadioButton = cmds.radioButton(label = '.', visible = False)
##
##
cmds.setParent('..')
cmds.setParent('..')
def gutCheck_update(self, intValue, *args):
cmds.intField(self.grade_intField, edit = True, value = intValue)
self.update_subcategory('intField')
def update_subcategory(self, control_type, *args):
"""
trigger on element change command to update all the other fields in subcategory
"""
if control_type is 'intField':
self.log('query intField and update others')
intField_value = cmds.intField(self.grade_intField, query = True, value = True)
self.log('intField is %s' % intField_value)
self.current_grade_value = intField_value
self.log('current grade is: %s' % self.current_grade_value)
cmds.intSlider(self.grade_slider, edit=True, value = -intField_value)
self.update_radios_default_comments(intField_value)
self.update_default_comments()
self.update_is_complete()
self.updateFunction()
elif control_type is 'slider':
self.log('query slider and update others')
slider_value = abs(cmds.intSlider(self.grade_slider, query = True, value = True))
self.log('intSlider is %s' % slider_value)
self.current_grade_value = slider_value
self.log('current grade is: %s' % self.current_grade_value)
cmds.intField(self.grade_intField, edit = True, value = slider_value)
self.update_radios_default_comments(slider_value)
self.update_default_comments()
self.update_is_complete()
self.updateFunction()
elif control_type is 'radioButton':
self.log('query radio collection and update others')
selected = cmds.radioCollection(self.grade_radio_collection, query = True, select = True)
selected_letter = cmds.radioButton(selected, query = True, label = True)
selected_letter = re.sub('\\+', 'plus', selected_letter)
self.log('selected radioButton: %s' % selected_letter)
self.current_grade_value = int(self.grade_values.find(selected_letter).text)
self.log('current grade is: %s' % self.current_grade_value)
cmds.intField(self.grade_intField, edit = True, value = self.current_grade_value)
cmds.intSlider(self.grade_slider, edit = True, value = -self.current_grade_value)
self.log('selected_letter: %s' % selected_letter)
cmds.scrollField(self.default_comments, edit = True, text = self.subcatXML.find('gradeComment').find(selected_letter).text)
self.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)
self.log('Default Comments Updated')
self.log(self.current_default_comment_text)
self.update_is_complete()
self.updateFunction()
elif control_type is 'default_comments_text':
self.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)
self.log('Default Comments Updated')
self.log(self.current_default_comment_text)
self.update_is_complete()
elif control_type is 'example_comments_text':
self.current_example_comment_text = cmds.scrollField(self.example_comments, query = True, text = True)
self.log('examples updated')
self.log(self.current_example_comment_text)
else:
self.current_comment_text = cmds.scrollField(self.comments_text_field, query = True, text = True)
self.log('comments updated')
self.log(self.current_comment_text)
def update_radios_default_comments(self, value):
"""
take value and set radio buttons associated with that buttons
"""
self.log('set dim radios')
grade_value_letter = ""
do_break = False
for g_value in self.grade_values:
for g_comment in self.subcatXML.find('gradeComment'):
if g_value.tag == g_comment.tag:
grade_value_letter = g_value.tag
if int(g_value.text) <= value:
do_break = True
break
if do_break:
break
grade_value_letter = re.sub('plus', '+', grade_value_letter)
radioButtons = cmds.radioCollection(self.grade_radio_collection, query = True, collectionItemArray = True)
# print('grade_value_letter: {}'.format(grade_value_letter))
for butn in radioButtons:
# print('radio button to test: {}'.format(cmds.radioButton(butn, query=True, label = True)))
if cmds.radioButton(butn, query=True, label = True) == grade_value_letter:
# print('they match... should have selected it...?')
cmds.radioButton(butn, edit = True, select = True)
def update_default_comments(self):
"""
query grade values and update default comments accordingly
SETS BASED ON RADIO BUTTONS. RADIO BUTTONS MUST BE UPDATED FIRST
"""
self.log('update dim default comments')
selected_letter = ''
radioButtons = cmds.radioCollection(self.grade_radio_collection, query = True, collectionItemArray = True)
for butn in radioButtons:
if cmds.radioButton(butn, query=True, select = True):
selected_letter = cmds.radioButton(butn, query = True, label = True)
break
if selected_letter == '':
cmds.error('selected_letter not set.\n{}\nGrade Value: {}\n\n'.format(self.title, self.current_grade_value))
# print('selected letter: {}'.format(selected_letter))
if '+' in selected_letter:
# print('plus detected!')
# selected_letter = re.sub('+', 'plus', selected_letter)
selected_letter = selected_letter.replace('+', 'plus')
# print('new selected_letter: {}'.format(selected_letter))
cmds.scrollField(self.default_comments, edit = True, text = self.subcatXML.find('gradeComment').find(selected_letter).text)
self.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)
self.log('Default Comments Updated')
self.log(self.current_default_comment_text)
def append_session_commment(self):
self.log('append session comment stuff')
def close_command(*args):
self.log('close command')
# maya.utils.executeDeferred("cmds.deleteUI('ASCW')")
def get_comment(*args):
self.log('get comment')
title = cmds.textField(comment_title, query = True, text = True)
comment = cmds.scrollField(comment_text, query = True, text = True)
self.log('\nTitle: {}\nComment: {}'.format(title, comment))
if title != 'Comment Title' and comment != 'Type your comment text here...':
cmds.menuItem(parent = self.rmb_menu, label = title, command = lambda args, i = comment:self.add_comment_to_comments(i))
cmds.deleteUI('ASCW')
reorder_comments()
else:
cmds.error('Type in a comment title and comment text to continue.\nClose the window to cancel.')
self.add_comment_to_comments(comment)
def reorder_comments(*args):
self.log('reorder comments')
comment_items = cmds.popupMenu(self.rmb_menu, query = True, itemArray = True)
comment_items[-1], comment_items[-2] = comment_items[-2], comment_items[-1]
comment_labels_commands = []
for i in comment_items:
l = cmds.menuItem(i, query = True, label = True)
c = cmds.menuItem(i, query = True, command = True)
comment_labels_commands.append((l,c))
cmds.popupMenu(self.rmb_menu, edit = True, deleteAllItems = True)
for i in comment_labels_commands:
cmds.menuItem(label = i[0], command = i[1], parent = self.rmb_menu)
self.log('make comment window')
window_widthHeight = (250, 200)
padding = 2
#if ASCW window exists delete it
if (cmds.window('ASCW', exists = True)):
cmds.deleteUI('ASCW')
comment_window = cmds.window('ASCW', title = 'Append Session Comment',
width = window_widthHeight[0],
height = window_widthHeight[1],
closeCommand = close_command)
comment_form = cmds.formLayout(numberOfDivisions = 250)
comment_title = cmds.textField(text = 'Comment Title')
comment_text = cmds.scrollField(editable = True, wordWrap = True, text = 'Type your comment text here...')
comment_btn = cmds.button(label = 'Append Comment', command = get_comment)
cmds.setParent('..')
cmds.formLayout(comment_form, edit = True, attachForm = [
(comment_title, 'left', padding),
(comment_title, 'right', padding),
(comment_title, 'top', padding),
(comment_text, 'left', padding),
(comment_text, 'right', padding),
(comment_btn, 'left', padding),
(comment_btn, 'right', padding),
(comment_btn, 'bottom', padding)],
attachControl = [
(comment_text, 'top', padding, comment_title),
(comment_text, 'bottom', padding, comment_btn)])
cmds.showWindow(comment_window)
def add_comment_to_comments(self, comment, *args):
self.log('add comment to comments')
text_bucket = cmds.scrollField(self.comments_text_field, query = True, text = True)
# self.log('index: {}'.format(index))
self.log('RMB: {}'.format(self.rmb))
text_bucket += ' {}'.format(comment)
cmds.scrollField(self.comments_text_field, edit = True, text = text_bucket)
self.update_subcategory('comments_text')
def add_selected_to_examples(self, *args):
"""
add selected objects to the example text fields
"""
self.log('Boom. Adding Selected to examples')
text_bucket = ''
selection = cmds.ls(selection = True, long = True)
self.log('selection is: %s' % selection)
text_bucket = cmds.scrollField(self.example_comments, query = True, text = True)
if text_bucket:
self.log('text_bucket is TRUE:: %s' % text_bucket)
for selected in selection:
text_bucket += ( ", " + selected)
else:
for selected in selection:
text_bucket += (selected + ', ')
text_bucket = text_bucket.rstrip(', ')
cmds.scrollField(self.example_comments, edit = True, text = text_bucket)
self.update_subcategory('example_comments_text')
def update_is_complete(self, reset = False):
self.log('updating "is_complete"')
if reset:
self.is_complete = False
self.log('is_complete: reset')
elif self.current_grade_value is 0 and self.current_default_comment_text is '':
self.is_complete = False
self.log('not complete')
else:
self.is_complete = True
self.log('is_complete now TRUE')
def what_is_the_grade(self):
"""
collect grade from subcategory and return
"""
return_dict = {
'section_title': self.title,
'section_weight': self.weight,
'grade_value' : self.current_grade_value,
'comment_text' : self.current_comment_text,
'default_comments_text' : self.current_default_comment_text,
'example_comments_text' : self.current_example_comment_text,
'is_complete': self.is_complete
}
return return_dict
def this_is_the_grade(self, grade_to_set):
"""
take an input dictionary and populate all the grade fields accordingly
"""
cmds.intField(self.grade_intField, edit = True, value = grade_to_set['grade_value'])
self.update_subcategory('intField')
if grade_to_set['grade_value'] is not '':
cmds.scrollField(self.comments_text_field, edit = True, text = grade_to_set['comment_text'])
self.update_subcategory('comments_text')
if grade_to_set['default_comments_text'] is not '':
cmds.scrollField(self.default_comments, edit = True, text = grade_to_set['default_comments_text'])
self.update_subcategory('default_comments_text')
if grade_to_set['example_comments_text'] is not '':
cmds.scrollField(self.example_comments, edit = True, text = grade_to_set['example_comments_text'])
self.update_subcategory('example_comments_text')
self.auto_flagged_list = grade_to_set.get('examples', [])
self.log('auto_flagged_list updated: \n{}'.format(self.auto_flagged_list))
def reset(self):
cmds.intField(self.grade_intField, edit = True, value = 0)
self.update_subcategory('intField')
cmds.scrollField(self.comments_text_field, edit = True, text = '')
self.update_subcategory('comments_text')
cmds.scrollField(self.default_comments, edit = True, text = '')
self.update_subcategory('default_comments_text')
cmds.scrollField(self.example_comments, edit = True, text = '')
self.update_subcategory('example_comments_text')
self.log('reset subsection: {}'.format(self.title))
self.log('Selection hidden radio')
cmds.radioButton(self.resetRadioButton, edit = True, select = True)
self.log('hidden radio selected')
self.is_complete = False
#collapse frames
cmds.frameLayout(self.subcat_comments_frame_layout, edit = True, collapse = True)
cmds.frameLayout(self.example_frameLayout, edit = True, collapse = True)
cmds.frameLayout(self.default_comments_frameLayout, edit = True, collapse = True)
def update(self):
self.current_grade_value = cmds.intField(self.grade_intField, query = True, value = True)
self.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)
self.current_example_comment_text = cmds.scrollField(self.example_comments, query = True, text = True)
self.current_comment_text = cmds.scrollField(self.comments_text_field, query = True, text = True)
def disable(self):
cmds.columnLayout(self.subcat_main_column_layout, edit = True, enable = False)
def enable(self):
cmds.columnLayout(self.subcat_main_column_layout, edit = True, enable = True)
def log(self, message, prefix = '.:subcategory_class::', hush = True):
"""
print stuff yo!
"""
if not hush:
print "%s: %s" % (prefix, message)
class MainCategoryGradeSection(object):
def __init__(self, mainCategoryFromXML, defaultsFromXML, updateFunction):
maincategory_width = 200
scrollField_height = 100
row_spacing = 0
self.current_highnote_comment_text = ''
self.current_grade_value = 0
self.updatePGS = updateFunction
self.log("Main Category Initializing")
self.maincategory = mainCategoryFromXML
self.defaults = defaultsFromXML
self.log('\n\nGutCheck:')
self.gutCheck = None
if self.maincategory.find('gutCheck') is not None:
self.gutCheck = self.maincategory.find('gutCheck').text
self.log('{}\n\n'.format(self.gutCheck))
self.rmb = []
if self.maincategory.findall('RMB'):
for item in self.maincategory.findall('RMB'):
self.rmb.append([item.get('title'), item.text])
self.log('\nRiGHT HERE!')
if self.rmb != []:
for item in self.rmb:
self.log('\nTitle: {}\n{}'.format(item[0], item[1]))
self.log('RMB: {}'.format(self.rmb))
self.title = self.maincategory.get('title')
if self.title == None:
self.title = self.maincategory.find('title').text
self.weight = self.maincategory.get('weight')
if self.weight == None:
self.weight = self.maincategory.find('weight').text
# self.title = self.maincategory.find('title').text
# self.weight = self.maincategory.find('weight').text
self.log('{} Category Weight: {}'.format(self.title, self.weight))
self.maincat_main_column_layout = cmds.columnLayout(columnWidth = maincategory_width, rowSpacing = row_spacing, enable = False)#main cat main column columnLayout
self.mainFrameLayout = cmds.frameLayout(label = 'High Notes',collapsable = True, collapse = True, width = maincategory_width)
if self.gutCheck == 'True':
self.log('running gut check GUI stuff')
self.gutCheckFrameLayout = cmds.frameLayout(label = 'Gut Check', collapsable = True, collapse = True, width = maincategory_width)
self.gutCheckWindowGo()
cmds.setParent(self.mainFrameLayout)
self.highnote_comments = cmds.scrollField(width = maincategory_width, height = scrollField_height, wordWrap = True, changeCommand = lambda *args: self.update_maincategory('highnotes', *args))
self.rmb_menu = cmds.popupMenu(parent = self.highnote_comments, button = 3)
# i = 0
if self.rmb != []:
for item in self.rmb:
self.log('{}:{}'.format(item[0], item[1]))
cmds.menuItem(label = item[0], command = lambda args, i = item[1]:self.add_comment_to_comments(i))
# i += 1
cmds.menuItem(label = 'Append session comment', command = lambda *args: self.append_session_commment())
cmds.setParent(self.mainFrameLayout)
cmds.setParent(self.maincat_main_column_layout)
subcatColumnLayout = cmds.columnLayout()
self.subcategories = []#this will be the subcategory object
# self.log('maincategory: %s' % self.maincategory.find('title').text)
self.log('main category title: %s' % self.title)
self.subcats = self.maincategory.findall('subcategory') #this is the xml subcategory
self.log('subcategories found: %s' % self.subcats)
# for subc in self.subcats:
# self.log('subcat: %s' % subc)
# self.log('title: %s' % subc.find('title').text)
for sub in self.subcats:
self.subcategories.append(SubcategoryGradeSection(sub, self.defaults, self.updatePGS, self.runAuto))
cmds.setParent(subcatColumnLayout)
cmds.setParent(self.maincat_main_column_layout)
def append_session_commment(self):
self.log('append session comment stuff')
def close_command(*args):
self.log('close command')
# maya.utils.executeDeferred("cmds.deleteUI('ASCW')")
def get_comment(*args):
self.log('get comment')
title = cmds.textField(comment_title, query = True, text = True)
comment = cmds.scrollField(comment_text, query = True, text = True)
self.log('Title: {}\nComment: {}'.format(title, comment))
if title != 'Comment Title' and comment != 'Type your comment text here...':
cmds.menuItem(parent = self.rmb_menu, label = title, command = lambda args, i = comment:self.add_comment_to_comments(i))
cmds.deleteUI('ASCW')
else:
cmds.error('Type in a comment title and comment text to continue.\nClose the window to cancel.')
self.log('make comment window')
window_widthHeight = (250, 200)
padding = 2
#if ASCW window exists delete it
if (cmds.window('ASCW', exists = True)):
cmds.deleteUI('ASCW')
comment_window = cmds.window('ASCW', title = 'Append Session Comment',
width = window_widthHeight[0],
height = window_widthHeight[1],
closeCommand = close_command)
comment_form = cmds.formLayout(numberOfDivisions = 250)
comment_title = cmds.textField(text = 'Comment Title')
comment_text = cmds.scrollField(editable = True, wordWrap = True, text = 'Type your comment text here...')
comment_btn = cmds.button(label = 'Append Comment', command = get_comment)
cmds.setParent('..')
cmds.formLayout(comment_form, edit = True, attachForm = [
(comment_title, 'left', padding),
(comment_title, 'right', padding),
(comment_title, 'top', padding),
(comment_text, 'left', padding),
(comment_text, 'right', padding),
(comment_btn, 'left', padding),
(comment_btn, 'right', padding),
(comment_btn, 'bottom', padding)],
attachControl = [
(comment_text, 'top', padding, comment_title),
(comment_text, 'bottom', padding, comment_btn)])
cmds.showWindow(comment_window)
def add_comment_to_comments(self, comment, *args):
self.log('add comment to comments')
text_bucket = cmds.scrollField(self.highnote_comments, query = True, text = True)
# self.log('index: {}'.format(index))
self.log('RMB: {}'.format(self.rmb))
text_bucket += ' {}'.format(comment)
cmds.scrollField(self.highnote_comments, edit = True, text = text_bucket)
self.update_maincategory('highnotes')
def enable(self):
self.log('enable the section')
cmds.columnLayout(self.maincat_main_column_layout, edit = True, enable = True)
for sub in self.subcategories:
sub.enable()
def disable(self):
self.log('disable the section')
cmds.columnLayout(self.maincat_main_column_layout, edit = True, enable = False)
for sub in self.subcategories:
sub.disable()
self.log('did it work?')
def gutCheckGo(self, *args):
self.log('gut check')
self.disable()
self.gutCheckWindowGo()
def gutCheckWindowGo(self, *args):
self.log('gut check window')
elem_width = 200
row_spacing = 0
# self.gutCheckWindowElement = cmds.window(title = '{} Gut Check'.format(self.title), width = 215, height = 100, closeCommand = self.gutCheckCancel)
self.gutCheckWindow = cmds.columnLayout(columnWidth = elem_width, rowSpacing = row_spacing)
cmds.text(label = 'Gut Check Input', align = 'left')
gutCheck_int_field_slider_row_layout = cmds.rowLayout(numberOfColumns = 2)#int_field_slider_row_layout
self.gutCheck_grade_intField = cmds.intField( minValue=0, maxValue=150, step=1 , width = elem_width/6,
changeCommand = lambda *args: self.gutCheck_update('field'))
self.gutCheck_grade_slider = cmds.intSlider( min=-100, max=0, value=0, step=1, width = elem_width*5/6, changeCommand = lambda *args: self.gutCheck_update('slider'), dragCommand = lambda *args: self.gutCheck_update('slider'))
cmds.setParent('..')
cmds.button(label = 'Commit', command = self.gutCheckSet, width = elem_width)
cmds.setParent('..')
# cmds.showWindow(self.gutCheckWindowElement)
def gutCheck_update(self, controlType):
self.log('gut check update')
if controlType == 'slider':
value = cmds.intSlider(self.gutCheck_grade_slider, query = True, value = True)
cmds.intField(self.gutCheck_grade_intField, edit = True, value = -value)
elif controlType == 'field':
value = cmds.intField(self.gutCheck_grade_intField, query = True, value = True)
cmds.intSlider(self.gutCheck_grade_slider, edit = True, value = -value)
def gutCheckSet(self, *args):
self.log('gut check set')
value = cmds.intField(self.gutCheck_grade_intField, query = True, value = True)
for sub in self.subcategories:
sub.gutCheck_update(value)
# cmds.deleteUI(self.gutCheckWindowElement)
# self.enable()
self.gutCheckReset()
def gutCheckReset(self, *args):
self.log('gut check reset!')
cmds.intField(self.gutCheck_grade_intField, edit = True, value = 0)
self.gutCheck_update('field')
cmds.frameLayout(self.gutCheckFrameLayout, edit = True, collapse = True)
cmds.frameLayout(self.mainFrameLayout, edit = True, collapse = True)
def gutCheckCancel(self, *args):
self.log('gut check cancel')
# cmds.deleteUI(self.gutCheckWindowElement)
self.enable()
def autoProGo(self):
self.log('defaults: {}'.format(self.defaults))
self.log('defaults.findall("auto"): {}'.format(self.defaults.findall('auto')[0]))
self.log('!!! autoText: {}'.format(self.defaults.findall('auto')[0].text))
if self.defaults.findall('auto')[0].text :
for sub in self.subcategories:
try:
self.runAuto(self.defaults, sub)
except RuntimeError as e:
cmds.warning('Error running automation. Skipping section: {}\n{}'.format(sub.title, e))
self.updatePGS()
def runAuto(self, defaultsFromXML, single_subcat, auto = False):
# this section handles all the automation linking on the subcategories
subcatXMLElement = single_subcat.subcatXML
if not auto:
try:
self.log("Lets try")
self.log(defaultsFromXML.find('auto').text)
self.log('subcatXMLElement:')
sub_title1 = subcatXMLElement.get('title')
if sub_title1 == None:
sub_title1 = subcatXMLElement.find('title').text
self.log('subcat title: %s' % sub_title1)
# self.log(subcatXMLElement.find('auto').text)
self.log("did those last two print?")
subcat_auto = None
if subcatXMLElement.find('auto') != None:
subcat_auto = subcatXMLElement.find('auto').text
if subcat_auto == '' or subcat_auto == None:
subcat_auto = False
if (defaultsFromXML.find('auto').text == 'True') and subcat_auto:
self.log('auto.text is %s' % subcatXMLElement.find('auto').text)
auto = True
except AttributeError as e:
self.log('AttributeError for Auto test: \n{}'.format(e))
# cmds.warning('AttributeError: {}'.format(sys.exc_info()[2].tb_lineno))
cmds.warning('AttributeError: {}: Line {}'.format(e,sys.exc_info()[2].tb_lineno))
pass
if auto:
self.log(subcatXMLElement.find('auto').text)
autoScriptName = subcatXMLElement.find('auto').text
self.log('auto is True!!!')
import pabuito_auto as autoRun
# reload(autoRun)
# autoScripts = dir(autoRun)
folder_name = defaultsFromXML.find('auto').get('folder')
if folder_name != None:
autoRun = getattr(autoRun, folder_name)
autoScripts = dir(autoRun)
self.log('Methods in auto run are \n %s' % autoScripts)
defaultMethods = defaultsList = ['__builtins__', '__doc__', '__file__', '__name__', '__package__', '__path__']
autoScriptModules = []
for method in autoScripts:
if method not in defaultMethods:
autoScriptModules.append(method)
self.log('autoScriptModules: %s' % autoScriptModules)
self.log('autoScriptName: %s' % autoScriptName)
# print('auoScriptModules:')
# for mod in autoScriptModules:
# print(mod)
if autoScriptName in autoScriptModules:
self.log('Found AutoScript!')
returnDict = getattr(getattr(autoRun, autoScriptName), autoScriptName)(self.defaults)
self.log(returnDict)
sub_title = subcatXMLElement.get('title')
if sub_title == None:
sub_title = subcatXMLElement.find('title').text
returnDict['section_title'] = sub_title
self.log('section_title: %s' % returnDict['section_title'])
single_subcat.this_is_the_grade(returnDict)
else:
self.log('Failed to find autoScriptName')
cmds.warning('Failed to find autoScriptName: {}'.format(autoScriptName))
else:
self.log('FALSE FALSE FALSE')
def update_maincategory(self, section, *args):
self.log('updating %s' % section)
if section is 'highnotes':
self.current_highnote_comment_text = cmds.scrollField(self.highnote_comments, query = True, text = True)
def update(self):
self.current_highnote_comment_text = cmds.scrollField(self.highnote_comments, query = True, text = True)
for subcat in self.subcategories:
subcat.update()
def check_grade_status(self):
self.log('checking %s section grade status' % self.title)
currentGrade = 0
catWeightAndValue = []
for subcat in self.subcategories:
catWeightAndValue.append((subcat.weight, subcat.current_grade_value))
for cat in catWeightAndValue:
currentGrade += ((float(cat[0])/100)*float(cat[1]))
return (self.title, self.weight, currentGrade)
def what_is_the_grade(self):
self.log('collect grades from subsections')
return_list = []
return_list.append(self.title)
return_list.append(self.weight)
return_list.append(self.current_highnote_comment_text)
sectionGradeTotal = 0
subGradeList = []
for sub in self.subcategories:
subGradeList.append(sub.what_is_the_grade())
self.log('Grade weight and value: {} * {}'.format(sub.what_is_the_grade()['grade_value'], sub.what_is_the_grade()['section_weight']))
sectionGradeTotal += (sub.what_is_the_grade()['grade_value'] * (float(sub.what_is_the_grade()['section_weight'])/100.0))
return_list.append(sectionGradeTotal)
return_list.append(subGradeList)
return return_list
def this_is_the_grade(self, gradeList):
sectionGrades = gradeList
i = 0
for item in sectionGrades:
self.log('index {} of sectionGrades: {}'.format(i, item))
i+=1
self.log('\n\nStill needs to set high notes\n\n')
cmds.scrollField(self.highnote_comments, edit = True, text = sectionGrades[2])
self.update_maincategory('highnotes')
self.log('section[3]:\n{}'.format(sectionGrades[4]))
for sub in self.subcategories:
for index in sectionGrades[4]:
if sub.title is index['section_title']:
sub.this_is_the_grade(index)
def are_you_complete(self):
incomplete_titles = []
for sub in self.subcategories:
self.log("Testing sub for complete-ness: %s" % sub.title)
if not sub.is_complete:
self.log('adding {} to incomplete_titles'.format(sub.title))
incomplete_titles.append(sub.title)
# return False
self.log('incomplete_titles: \n{}'.format(incomplete_titles))
return incomplete_titles
# return True
def reset(self):
self.log('resetting main section')
cmds.scrollField(self.highnote_comments, edit = True, text = '')
cmds.frameLayout(self.mainFrameLayout, edit = True, collapse = True)
# self.disable()
for sub in self.subcategories:
sub.reset()
self.update_maincategory('highnotes')
self.log('Main section {} reset'.format(self.title))
def log(self, message, prefix = '.:main_category_class::', hush = True):
"""
print stuff yo!
"""
if not hush:
print "%s: %s" % (prefix, message) |
10,111 | d9aa30b8557d9797b9275b5d3dfe2cb31d4755c8 | #!/usr/bin/env python
import numpy as np
import sys
import os
import argparse as ARG
import pylab as plt
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '.')))
import fqlag
from sim_psd import simulate_light_curves
try:
import aztools as az
except:
raise ImportError(('aztools was not found. Please download from '
'https://github.com/zoghbi-a/aztools'))
def plot_psdf(pfits, fits, psd_model, input_pars=None):
"""Summarize psd simulation"""
# pfits.shape: (nsim, 2*npar+1)
# fits.shape: (nsim+1, nfq)
mod_fq, mod_psd = psd_model
pars, perr = np.split(pfits[:,:-1], 2, axis=1)
npar = pars.shape[1]
fit_q = np.quantile(pars, [0.5, 0.1587, 0.8413], 0)
fit_p, fit_sd = np.median(pars, 0), np.std(pars, 0)
fit_e = np.mean(perr, 0)
m_fq, m_mod = fits[0], fits[1:]
m_mod = np.log(m_mod)
m_q = np.quantile(m_mod, [0.5, 0.1587, 0.8413], 0)
m_p, m_sd = np.median(m_mod, 0), np.std(m_mod, 0)
fig, ax = plt.subplots(1,1+pars.shape[1],figsize=(10,4))
ax[0].semilogx(mod_fq, mod_psd, label='model', color='C0')
ax[0].fill_between(m_fq, m_q[1], m_q[2], alpha=0.5, color='C1')
for ii in range(npar):
h = ax[ii+1].hist(pars[:,ii], 20, density=1, alpha=0.5)
ax[ii+1].plot([fit_q[0,ii]]*2, [0, h[0].max()])
if not input_pars[ii] is None:
ax[ii+1].plot([input_pars[ii]]*2, [0, h[0].max()], lw=3)
plt.tight_layout()
def fit_psdf(fql, model, lc, sim_extra, suff, Dt=None, input_pars=None):
"""Calculated log psd for a set of simulated light curves
Args:
fql: array of frequency boundaries
model: [mod, p0] mod: string for built-in model in psdf,
p0: starting parameters for the model
lc: array of simulated light curves of shape (nsim, 3, nt)
sim_extra: dict from simulate_light_curves
suff: e.g. '1' so files are saved as psdf__1.*
Dt: if not None, apply aliasing correction
input_pars: a list of input parameters used in the simulaion
to compare the results to, Use None for parameter to skip.
Used mainly for plotting
"""
mod, p0 = model
p0 = np.array(p0)
fits, pfits = [], []
for tarr,yarr,yerr in lc:
pmod = fqlag.Psdf(tarr, yarr, yerr, fql, mod, dt=Dt)
pfit = fqlag.misc.maximize(pmod, p0)
pfits.append(np.concatenate([pfit[0], pfit[1], [-pfit[2].fun]]))
fits.append(pmod.psd_func(pmod.fq, pfit[0]))
# shape: nsim, 2*npar+1 (pars, perr, loglikelihod)
pfits = np.array(pfits)
fits = np.array(fits)
fits = np.r_[[pmod.fq], fits]
psd_model = sim_extra['psd_model']
psd_model[1] = np.log(psd_model[1])
plot_psdf(pfits, fits, psd_model, input_pars)
os.system('mkdir -p figures')
plt.savefig('figures/psdf__%s.png'%suff)
os.system('mkdir -p npz')
np.savez('npz/psdf__%s.npz'%suff, fits=fits, fql=fql, sim_data=sim_extra)
def psdf_1(**kwargs):
"""input powerlaw psd; no gaps, no alias, noleak, gauss noise, fit with pl,
no extended frequency, NFQ=8"""
# fqlag parameters #
n = 2**8
dt = 1.0
fql = np.array([1./(dt*n), 0.5/dt])
lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100)
model = ['pl', [-5, -2]]
inP = extra['input_psd'][1]
inP[0] = np.log(inP[0])
fit_psdf(fql, model, lc, extra, '1', input_pars=inP)
def psdf_2(**kwargs):
"""input powerlaw psd; no gaps, no alias, noleak, gauss noise, fit with pl,
EXTEND frequency, NFQ=8"""
# fqlag parameters #
n = 2**8
dt = 1.0
fql = np.array([.5/(dt*n), 1./dt])
lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100)
model = ['pl', [-5, -2]]
inP = extra['input_psd'][1]
inP[0] = np.log(inP[0])
fit_psdf(fql, model, lc, extra, '2', input_pars=inP)
def psdf_3(**kwargs):
"""input powerlaw psd; no gaps, no alias, noleak, gauss noise, fit with pl,
EXTEND frequency, NFQ=24"""
# fqlag parameters #
n = 2**8
dt = 1.0
fql = np.array([.5/(dt*n), 1./dt])
lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100)
model = ['pl', [-5, -2]]
inP = extra['input_psd'][1]
inP[0] = np.log(inP[0])
fit_psdf(fql, model, lc, extra, '3', input_pars=inP)
def psdf_4(**kwargs):
"""input bkn pl psd; no gaps, no alias, noleak, gauss noise, fit with bpl,
EXTEND frequency"""
# fqlag parameters #
n = 2**8
dt = 1.0
fql = np.array([.5/(dt*n), 1./dt])
lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100,
input_psd=['broken_powerlaw', [1e-4, -1, -2, 3e-2]])
model = ['bpl', [-5, -2, -3]]
inP = extra['input_psd'][1]
inP = [np.log(inP[0]), inP[2], np.log(inP[3])]
fit_psdf(fql, model, lc, extra, '4', input_pars=inP)
def psdf_5(**kwargs):
"""input bpl psd; no gaps, no alias, noleak, gauss noise, fit with bpl,
EXTEND frequency"""
# fqlag parameters #
n = 2**8
dt = 1.0
fql = np.array([.5/(dt*n), 1./dt])
lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100,
input_psd=['bending_powerlaw', [1e-4, -2, 3e-3]])
model = ['bpl', [-5, -2, -5]]
inP = extra['input_psd'][1]
inP = [np.log(inP[0]), inP[1], np.log(inP[2])]
fit_psdf(fql, model, lc, extra, '5', input_pars=inP)
if __name__ == '__main__':
parser = ARG.ArgumentParser(
description="Run simulations for the PSD calculation",
formatter_class=ARG.ArgumentDefaultsHelpFormatter)
parser.add_argument('--psdf_1', action='store_true', default=False,
help="psd modeling with pl model simulation.")
parser.add_argument('--psdf_2', action='store_true', default=False,
help="psd modeling with pl, extend freq.")
parser.add_argument('--psdf_3', action='store_true', default=False,
help="psd modeling with pl, extend freq. nfq=24")
parser.add_argument('--psdf_4', action='store_true', default=False,
help="psd modeling with pl, extend freq.. bkn pl input")
parser.add_argument('--psdf_5', action='store_true', default=False,
help="psd modeling with pl, extend freq.. bpl input")
# process arguments #
args = parser.parse_args()
# powerlaw psd, log; no gaps, no alias, noleak, gauss noise, no extra freq #
if args.psdf_1: psdf_1()
# powerlaw psd, log; no gaps, no alias, noleak, gauss noise, EXTENDED freq #
if args.psdf_2: psdf_2()
# powerlaw psd, log; no gaps, no alias, noleak, gauss noise, EXTENDED freq; nfq=24 #
if args.psdf_3: psdf_3()
# bkn pl psd, log; no gaps, no alias, noleak, gauss noise, EXTENDED freq; #
if args.psdf_4: psdf_4()
# bpl psd, log; no gaps, no alias, noleak, gauss noise, EXTENDED freq; #
if args.psdf_5: psdf_5()
|
10,112 | 33c49773b2ebb081ed2436e398eaaf389720c24c | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 12:04:20 2019
@author: Usuario
"""
from matplotlib import pyplot as plt
import argparse
import cv2
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-i","--image",required=True,help="Set the Path of image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
# splist image int channels B,G,AND R
chans = cv2.split(image)
colors = ("#0091ea","#0097a7","#d32f2f")
plt.figure()
plt.title("Flattened color Histogram")
plt.xlabel("Bins")
plt.ylabel("# of Pixels")
for (chan,color) in zip(chans,colors):
hist = cv2.calcHist([chan],[0],None,[256],[0,256])
plt.plot(hist,color = color)
plt.xlim([0,256])
plt.show()
|
10,113 | d802b6d08fc7e5b47705374aa305ee6f0b23690b | # -*- coding:UTF-8 -*-
import cx_Oracle
import datetime
import os
# ่ฎพ็ฝฎๅญ็ฌฆ้ไธoracleไธ่ด๏ผไธ็ถinsertไธญๆไนฑ็
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.ZHS16GBK'
print('====beging...')
# ่ทๅ้่ฆๅคๆญ็ๆฐๆฎไฟกๆฏ
startTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
print('===========================',startTime,'============================')
DB_UserName = ""
DB_UserPwd = ""
DB_ConnectStr = ""
def Oracle_Query(SqlStr, debug=0):
"Execute oracle query, and return data_list"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
data_list = []
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
while 1:
rs = cursor.fetchone()
if rs == None:
break
data_list.append(rs)
if debug:
fieldnames = []
for field in cursor.description:
fieldnames.append(field[0])
print(fieldnames)
print(data_list)
print("Query success!")
except:
print("Exec sql failed: %s" % SqlStr)
finally:
cursor.close()
# conn.close()
return data_list
def Oracle_Exec(SqlStr):
"Execute oracle command"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
conn.commit()
return True
except:
conn.rollback()
print("Exec sql failed: %s" % SqlStr)
return False
finally:
cursor.close()
# conn.close()
# ๅคๆญๅๆ ๆฏๅฆๅๅๆ ๅๅ
def is_pt_in_poly(aLon, aLat, pointList):
'''''
:param aLon: double ็ปๅบฆ
:param aLat: double ็บฌๅบฆ
:param pointList: list [(lon, lat)...] ๅค่พนๅฝข็น็้กบๅบ้ๆ นๆฎ้กบๆถ้ๆ้ๆถ้๏ผไธ่ฝไนฑ
'''
iSum = 0
iCount = len(pointList)
if (iCount < 3):
return False
# print("iCount = " + str(iCount))
for i in range(iCount):
pLon1 = pointList[i][0]
pLat1 = pointList[i][1]
if (i == iCount - 1):
pLon2 = pointList[0][0]
pLat2 = pointList[0][1]
else:
# print(i+1)
try:
pLon2 = pointList[i + 1][0]
pLat2 = pointList[i + 1][1]
except IndexError:
break
###่ฝฌๆขๆฐๅผ็ฑปๅ
pLon1 = float(pLon1)
pLat1 = float(pLat1)
pLon2 = float(pLon2)
pLat2 = float(pLat2)
aLat = float(aLat)
aLon = float(aLon)
if ((aLat >= pLat1) and (aLat < pLat2)) or ((aLat >= pLat2) and (aLat < pLat1)):
if (abs(pLat1 - pLat2) > 0):
pLon = pLon1 - ((pLon1 - pLon2) * (pLat1 - aLat)) / (pLat1 - pLat2)
if (pLon < aLon):
iSum += 1
if (iSum % 2 != 0):
return True
else:
return False
# ๆ ผๅผๅไปexcelไธญๅพๅฐ็ๅ
ๅฎน
# def get_file_row(row,file_name):
# wb_1 = openpyxl.load_workbook('%s' % file_name)
# ws_1 = wb_1.active
# colC = ws_1['%s' % row]
# list_1 = [x for x in colC]
# list_2 = []
# for i in list_1:
# list_2.append(i.value)
# return list_2
# ๆฅ่ฏข่ทๅพๅบ็ซ็ๅ็งฐใ็ปๅบฆใ็บฌๅบฆ
def get_station_lo_la():
sql_sec_name_lo_la = "SELECT A.B_STATION_NAME,A.LONGITUDE,A.LATITUDE FROM B_BASE_STATION_INFO A"
list_station_lo_la = Oracle_Query(sql_sec_name_lo_la)
return list_station_lo_la
# ๆฅ่ฏข่ทๅพ็ฝๆ ผ็idใๅ็งฐใ็ฝๆ ผๅๆ ๅ ๆๅฎๅธ
def get_point_2(org_id, orgLevel):
sql_sec_orgid_orgname_point = "select r.org_code,r.org_name,r.point " \
"from (" \
"select a.org_code,a.org_name,a.p_org_id,a.org_level,b.point " \
"from s_orgnization a,p_map_draw b " \
"where a.org_id=b.org_id " \
"and b.state='00A' " \
"and a.state='00A' " \
"and a.org_id in ( " \
"select org_id from s_orgnization start with org_id='%s' " \
"connect by prior org_id=p_org_id)) r,s_orgnization o " \
"where r.p_org_id=o.org_id " \
"and r.org_level='%s'" % (str(org_id), str(orgLevel))
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# ๆฅ่ฏข่ทๅพ็ฝๆ ผ็idใๅ็งฐใ็ฝๆ ผๅๆ ๅ
def get_point():
sql_sec_orgid_orgname_point = "select a.org_id,a.org_name,b.point from s_orgnization a " \
"left join p_map_draw b on b.org_id = a.org_id" \
" where a.org_level='5'"
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# ๆ ผๅผๅcblobๅญๆฎต
def expand_list(tList):
mList = tList.read().split(";")
flag = 0
for i in mList:
mList[flag] = mList[flag].split(',')
flag += 1
return mList
# ไฟฎๆนๅบ็ซgrid_id bak่กจ
def update_station_grid_id(gridId, bStationName):
sqlUpdateGridId = "update b_base_station_info_bak a " \
"set a.grid_id='%s' " \
"where a.b_station_name='%s'" % (str(gridId), bStationName)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# ๅฏนๆฏไธคไธชb_station_name็grid_idๆฏๅฆ็ธๅ
def judge_station_name(stationName):
sqlGridIdFromInfo = " select grid_id from b_base_station_info_bak where b_station_name = '%s'" % stationName
sqlGridIdFromMid = "select grid_id from B_STATION_GRID_MID where b_station_name = '%s'" % stationName
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# ๆ ผๅผๅorgIdNamePoint
def expand_orgidnamepoint(orgIdNamePoint):
flag = 0
for i in orgIdNamePoint:
if i[2] is not None:
orgIdNamePoint[flag] = list(orgIdNamePoint[flag])
orgIdNamePoint[flag][2] = expand_list(i[2])
else:
continue
flag += 1
return orgIdNamePoint
# ่ทๅๆฐๆฎๅ
ฅไธญ้ด่กจ
def in_station_mid_table(stationLoLa, orgIdNamePoint):
for station_name, station_lo, station_la in stationLoLa: # ่ทๅๅบ็ซ็็ป็บฌๅบฆ
for ord_id, org_name, org_point in orgIdNamePoint: # ่ทๅ็ฝๆ ผ็็ธๅ
ณๅ
ๅฎน (idใnameใpoint list)
judge_result = is_pt_in_poly(station_lo, station_la, org_point)
if judge_result:
sql_insert_b_station_grid_mid = "insert into b_station_grid_mid (org_name,grid_id,b_station_name) " \
"values ('%s','%s','%s')" % (org_name, ord_id, station_name)
Oracle_Exec(sql_insert_b_station_grid_mid)
break
# ๅฏน็
งไธญ้ด่กจไฟฎๆนๆญฃๅผ่กจไธญ็ๆๆๆฐๆฎ
def updata_station_all():
sqlSecStationNameFromMid = "select b_station_name from B_STATION_GRID_MID"
stationNameList = Oracle_Query(sqlSecStationNameFromMid)
for stationNameTup in stationNameList:
gridId = judge_station_name(stationNameTup[0])
if gridId:
print(stationNameTup[0])
print(gridId)
update_station_grid_id(gridId[0][0], stationNameTup[0])
else:
continue
# ่ทๅๅฐๅบ็ๅ็งฐไปฅๅ็ป็บฌๅบฆ
def get_cell_lo_la(cityId, dayID):
"""
:param cityId: ๅฐๅบๆๅจ็ๅๅธID
:return:
"""
sqlGetCellIdLoLa = "SELECT A.CELL_ID,A.LONGITUDE,A.LATITUDE " \
"FROM B_SUBDISTRICT_INFO A " \
"WHERE CITY_ID='%s' " \
"AND A.LONGITUDE IS NOT NULL " \
"AND A.LATITUDE IS NOT NULL " \
"AND A.DAY_ID='%s'" % (str(cityId), str(dayID))
listCellLoLa = Oracle_Query(sqlGetCellIdLoLa)
return listCellLoLa
# ๅคๆญๅฐๅบ็็ปๆๅนถๅฝๅ
ฅไธญ้ด่กจ
def in_cell_mid_table(cellLoLa, orgIdNamePoint, orgLevel, dayId, cityId):
"""
:param cellLoLa: ๅฐๅบ็ๅๆ ไฟกๆฏ
:param orgIdNamePoint: ็ฝๆ ผ็ๅๆ ่ๅดไฟกๆฏ
:param orgLevel: ็ฝๆ ผ็ญ็บง
:return: ๆ ่ฟๅๅผ
"""
for cellId, cellLo, cellLa in cellLoLa: # ่ทๅๅบ็ซ็็ป็บฌๅบฆ
for ord_id, org_name, org_point in orgIdNamePoint: # ่ทๅ็ฝๆ ผ็็ธๅ
ณๅ
ๅฎน (idใnameใpoint list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# ๅคๆญๅฐๅบ็็ปๆๅนถๅฝๅ
ฅไธญ้ด่กจ
def in_cell_mid_table_grid_id(cellLoLa, orgIdNamePoint_5, orgIdNamePoint_4, orgLevel_5, orgLevel_4,dayId, cityId):
"""
:param cellLoLa: ๅฐๅบ็ๅๆ ไฟกๆฏ
:param orgIdNamePoint: ็ฝๆ ผ็ๅๆ ่ๅดไฟกๆฏ
:param orgLevel: ็ฝๆ ผ็ญ็บง
:return: ๆ ่ฟๅๅผ
"""
for cellId, cellLo, cellLa in cellLoLa: # ่ทๅๅบ็ซ็็ป็บฌๅบฆ
flag = 0
for ord_id, org_name, org_point in orgIdNamePoint_5: # ่ทๅ็ฝๆ ผ็็ธๅ
ณๅ
ๅฎน (idใnameใpoint list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_5), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
flag = 1
break
if flag == 0:
for ord_id, org_name, org_point in orgIdNamePoint_4: # ่ทๅ็ฝๆ ผ็็ธๅ
ณๅ
ๅฎน (idใnameใpoint list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_4), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# ๅฏนๆฏไธคไธชcell_id็region_idๆฏๅฆ็ธๅ
def judge_cell_id_region(cellId):
sqlGridIdFromInfo = " select region from B_SUBDISTRICT_INFO where cell_id = '%s'" % cellId
sqlGridIdFromMid = "select grid_id from B_CELL_MID where cell_id = '%s'" % cellId
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# ไฟฎๆนๅบ็ซgrid_id bak่กจ
def update_cell_region(gridId, cellId):
sqlUpdateGridId = "update B_SUBDISTRICT_INFO a " \
"set a.region='%s' " \
"where a.cell_id='%s'" % (str(gridId), cellId)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# ๅฏน็
งไธญ้ด่กจไฟฎๆนๆญฃๅผ่กจไธญ็ๆๆๆฐๆฎ updateๅฐๅบregion_id
def updata_cell_region_all():
sqlSecCellIdFromMid = "select cell_id from B_CELL_MID"
cellIdList = Oracle_Query(sqlSecCellIdFromMid)
for cellIdTup in cellIdList:
gridId = judge_cell_id_region(cellIdTup[0])
if gridId:
print(cellIdTup[0])
print(gridId)
update_cell_region(gridId[0][0], cellIdTup[0])
else:
continue
|
10,114 | a7114a9f1c7342e155cde04dc0f5d1461fd08f87 | '''
Given a string s, return the longest palindromic substring in s.
-------
RESULTS
-------
Time Complexity:
Space Complexity:
''' |
10,115 | 15f11fae7dbededb6aaf3292c8e211a9b86c605d | from django.contrib import admin
from .models import Article
# Register your models here.
@admin.register(Article)
class ArticleModel(admin.ModelAdmin):
list_filter = ('subject', 'date_of_publish', 'state')
list_display = ('subject', 'date_of_publish', 'state')
date_hierarchy = ('date_of_publish')
|
10,116 | a6852f294fd536d7b020db33fab561a12829837f | from django.contrib.auth.hashers import BasePasswordHasher
from collections import OrderedDict
class PlainTextPassword(BasePasswordHasher):
algorithm = "plain"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return password
def verify(self, password, encoded):
return password == encoded
def safe_summary(self, encoded):
return OrderedDict([
(('algorithm'), self.algorithm),
(('hash'), encoded),
])
|
10,117 | abe111117ab67c2fbc0cba8d867937fb54b9b7da | """
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import torch
import torch.nn as nn
class WordAttNet(nn.Module):
def __init__(self, hidden_size, word_dict_len, embed_size):
super(WordAttNet, self).__init__()
self.lookup = nn.Embedding(num_embeddings=word_dict_len, embedding_dim=embed_size)
self.gru = nn.GRU(embed_size, hidden_size, bidirectional=True)
def forward(self, input, hidden_state):
# print("word attn model ???")
# print("input shape : ", input.shape) # [seq len, batch size]
output = self.lookup(input)
# print("word attention model .....")
# print("output shape : ", output) # [seq len, batch size, emb dim]
# validate_tensor(hidden_state, "error found hidden state")
# validate_tensor(input, "error found in input")
f_output, h_output = self.gru(output.float(), hidden_state) # feature output and hidden state output
hidden = torch.cat((h_output[-2, :, :], h_output[-1, :, :]), dim=1)
return hidden.unsqueeze(0), h_output
if __name__ == "__main__":
abc = WordAttNet("../ag_news_csv/glove.6B.50d.txt") |
10,118 | 6523c652c98874716aa973e39d767e0915aa46fd | '''
้ฎ้ข๏ผๅฆๆไธๅ็็ถ็ฑปๅบ็ฐไบ็ธๅๅ็งฐ็ๅฑๆงๆ่
ๆนๆณ
ๅญ็ฑปไผ็ปงๆฟ่ฐ็ๅฑๆงๆ่
ๆนๆณ๏ผ
python3ไธญ้ฝๆฏๆฐๅผ็ฑป๏ผๅนฟๅบฆไผๅ
๏ผไป็ถ็ฑปไธญๆฅ่ฏขๅฏนๅบ็ๆนๆณ๏ผๆฅ่ฏขๅฐ็ฌฌไธไธชๆปก่ถณ็ๆนๆณไนๅๅฐฑ็ดๆฅ่ฟๅ
object
|
A(object)
|
A_1(A) --> A_2(A)
|
Test(A_1, A_2)
python2ไธญ็็ปๅ
ธ็ฑป๏ผๆทฑๅบฆไผๅ
A
|
A --> A_2(A)
|
A_1(A)
|
Test(A_1, A_2)
'''
class A:
def test(self):
print("aaaaaaaaaaaaaaaaa")
class A_1(A):
def test1(self):
print("1111111111111111111")
class A_2:
def test(self):
print("2222222222222222222")
class Test(A_1, A_2):
pass
t = Test()
t.test() # 1111111111111111111
# [<class '__main__.Test'>, <class '__main__.A_1'>, <class '__main__.A_2'>, <class '__main__.A'>, <class 'object'>]
print(Test.mro()) |
10,119 | 3345787c7d7a7a72f1db2490cc585d39ca74240e | __author__ = 'ericdennison'
from hhscp import app
from hhscp.events import *
from hhscp.users import User, Users, ADMIN, USERSFILE
from flask import render_template
from flask import request, session
from flask import redirect
import inspect
import sys
import datetime
import tempfile
@app.route('/calendar')
def site_calendar():
c = Calendar()
return render_template('calendar.html', title='Computer Programming Course Calendar', weeks=c.weeks)
@app.route('/')
def site_main():
return render_template('main.html', title='HHS - Computer Programming')
@app.route('/assignment/<short_name>')
def site_assignment(short_name):
c = Calendar(True) # allow all assignments
event = c.assignments.nameDict[short_name]
return render_template('assignments/' + short_name + '.html',
title=event.name,
datetime=datetime,
builtins=__builtins__)
@app.route('/user/<name>')
def site_user(name):
u = User(name)
if u.isadminsession() or u.authenticated():
return render_template('user.html', title=u.longname, user=u )
elif not u.passhash:
return redirect(url_for('site_getnewpass', name=name))
elif not u.authenticated():
return redirect(url_for('site_authenticate', name=name))
else:
return redirect(url_for('site_users'))
@app.route('/logout')
def site_logout():
u = Users()
u.logout()
return redirect(url_for('site_users'))
@app.route('/getnewpass/<name>')
def site_getnewpass(name):
u = User(name)
if not u.passhash or u.authenticated():
return render_template('setpass.html', user=u)
else:
redirect(url_for('site_authenticate', name=name))
@app.route('/getnewpasssubmit/<name>', methods=['POST'])
def site_getnewpasssubmit(name):
u = User(name)
if (not u.passhash or u.authenticated()) and request.form['pass1'] == request.form['pass2']:
u.setpassword(request.form['pass1'])
return redirect(url_for('site_authenticate', name=name))
@app.route('/authenticate/<name>')
def site_authenticate(name):
u = User(name)
return render_template('authenticate.html', user=u)
@app.route('/authenticatesubmit/<name>', methods=['POST'])
def site_authenticatesubmit(name):
u = User(name)
if request.method == 'POST':
u.login(request.form['pass'])
return redirect(url_for('site_user', name=name))
@app.route('/users')
def site_users():
u = Users()
return render_template('users.html', title='Computer Programming Users', users=u)
@app.route('/userchallengesubmit/<username>/<challengename>', methods=['POST'])
def site_userchallengesubmit(username, challengename):
u = User(username)
c = u.challenge(challengename)
if not u.authenticated():
return redirect(url_for('site_authenticate', name=username))
if request.method == 'POST':
file = request.files['file']
file.seek(0,2)
if not file.tell():
file.close()
file = tempfile.NamedTemporaryFile(mode='br+')
file.write(bytes(request.form['data'], 'UTF-8'))
file.seek(0)
c.testcanonicalexample()
c.runtest(file)
c.savestate(u.datapath)
return redirect(c.userchallengeurl(username))
@app.route('/userchallenge/<username>/<challengename>')
def site_userchallenge(username, challengename):
u = User(username)
c = u.challenge(challengename)
if not u.authenticated() and not u.isadminsession():
return redirect(url_for('site_authenticate', name=username))
return render_template('userchallenge.html', title=u.longname+' - '+c.testname, user=u, challenge=c)
@app.route('/userchallenge/download/<username>/<challengename>')
def site_userchallengedownload(username, challengename):
u = User(username)
if not u.authenticated() and not u.isadminsession():
return redirect(url_for('site_authenticate', name=username))
c = u.challenge(challengename)
rv = app.make_response(c.testcode)
#rv.headers.add_header('Content-Disposition: attachment', 'filename=' + u.shortname + c.testname + '.py')
rv.headers.add('Content-Disposition','attachment', filename=u.shortname + c.testname + '.py')
rv.mimetype = 'text/plain'
return rv
# filename is one of 'users', 'holidays', 'assignments'
PATHNAMES = {'users':USERSFILE, 'holidays':HOLIDAYSFILE, 'assignments':ASSIGNMENTSFILE}
@app.route('/admin_files/<filename>')
def admin_files(filename):
u = User(ADMIN)
if u.isadminsession() and filename in PATHNAMES:
data = open(PATHNAMES[filename],'r').read()
return render_template('editfile.html', filename=filename, data=data)
else:
return redirect(url_for('site_main'))
@app.route('/admin_filessubmit/<filename>', methods=['POST'])
def admin_filessubmit(filename):
u = User(ADMIN)
if u.isadminsession() and filename in PATHNAMES:
data = request.form['data']
open(PATHNAMES[filename],'w').write(data)
return redirect(url_for('site_main'))
|
10,120 | 1d14118d1ee7f2888ed7ee68be4a27ff7488faab | import random
from socket import *
port = 2500
BUFFER = 1024
server = "localhost"
sock = socket(AF_INET, SOCK_DGRAM)
sock.connect((server, port))
for i in range(10):
delay = 0.1
data = 'Hello message'
while True:
sock.send(data.encode())
print("Waiting up to {} secondes for a reply".format(delay))
sock.settimeout(delay)
try:
data = sock.recv(BUFFER)
except timeout:
delay *= 2
if delay > 2.0:
print("The server seems to be down")
break
else:
print('Response:', data.decode())
break
|
10,121 | 3eee7008c7db28f7c6fcc4500683b34f808aa2d7 | # -*- coding: utf-8 -*-
# // --- Directions
# // Check to see if two provided strings are anagrams of eachother.
# // One string is an anagram of another if it uses the same characters
# // in the same quantity. Only consider characters, not spaces
# // or punctuation. Consider capital letters to be the same as lower case
# // --- Examples
# // anagrams('rail safety', 'fairy tales') --> True
# // anagrams('RAIL! SAFETY!', 'fairy tales') --> True
# // anagrams('Hi there', 'Bye there') --> False
def anagrams(s1, s2):
s1.lower()
s2.lower()
if(sorted(s1) == sorted(s2)):
print("The strings are anagrams.")
return True
else:
print("The strings aren't anagrams.")
return False
anagrams('hello', 'llohe')
anagrams('A tree, a life, a bench', 'A tree, a fence, a yard')
anagrams('One one', 'One one c')
anagrams('Whoa! Hi!', 'Hi! Whoa!')
anagrams('RAIL! SAFETY!', 'fairy tales') |
10,122 | c33697970dfb1d99d9b071460c235dbf2b612e62 | from DateTime import DateTime
from Products.ATContentTypes.interfaces import IATEvent as IATEvent_ATCT
from Products.ATContentTypes.tests.utils import EmailValidator
from Products.ATContentTypes.tests.utils import EmptyValidator
from Products.ATContentTypes.tests.utils import NotRequiredTidyHTMLValidator
from Products.ATContentTypes.tests.utils import URLValidator
from Products.ATContentTypes.tests.utils import dcEdit
from Products.Archetypes import atapi
from Products.Archetypes.interfaces.layer import ILayerContainer
from Products.CMFCore.permissions import ModifyPortalContent
from Products.CMFCore.permissions import View
from datetime import datetime
from plone.app.event.at.content import default_end
from plone.app.event.at.content import default_start
from plone.app.event.at.interfaces import IATEvent
from plone.app.event.at.interfaces import IATEventRecurrence
from plone.app.event.base import default_timezone
from plone.app.event.testing import PAEventAT_INTEGRATION_TESTING
from plone.app.event.testing import set_env_timezone
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.event.interfaces import IEvent
from plone.event.interfaces import IEventAccessor
from plone.event.interfaces import IEventRecurrence
from plone.event.utils import pydt
from plone.formwidget.datetime.at import DatetimeWidget
from plone.formwidget.recurrence.at.widget import RecurrenceValidator
from plone.formwidget.recurrence.at.widget import RecurrenceWidget
from zope.event import notify
from zope.interface.verify import verifyObject
from zope.lifecycleevent import ObjectModifiedEvent
import itertools
import pytz
import unittest2 as unittest
TZNAME = "Europe/Vienna"
OBJ_DATA = {
'location': 'my location',
'subject': 'Meeting',
'eventUrl': 'http://example.org/',
'startDate': DateTime(TZNAME), # Initialize with timezone, even if
'endDate': DateTime(TZNAME) + 1, # it wouldn't be needed here.
# It's needed for test comparsion.
'timezone': TZNAME,
'contactName': 'John Doe',
'contactPhone': '+1212356789',
'contactEmail': 'john@example.org',
'attendees': (
'john@doe.com',
'john@doe.org',
'john@example.org'),
'text': "lorem ipsum"}
class FakeRequest:
def __init__(self):
self.other = {}
self.form = {}
class PAEventAccessorTest(unittest.TestCase):
layer = PAEventAT_INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
setRoles(self.portal, TEST_USER_ID, ['Manager'])
def test_event_accessor(self):
utc = pytz.utc
vienna = pytz.timezone('Europe/Vienna')
self.portal.invokeFactory(
'Event',
'event1',
description='a description',
startDate=datetime(2011, 11, 11, 11, 0, tzinfo=utc),
endDate=datetime(2011, 11, 11, 12, 0, tzinfo=utc),
timezone='UTC',
wholeDay=False
)
e1 = self.portal['event1']
acc = IEventAccessor(e1)
# TEST DATES
self.assertEqual(acc.start, datetime(2011, 11, 11, 11, 0, tzinfo=utc))
self.assertEqual(acc.end, datetime(2011, 11, 11, 12, 0, tzinfo=utc))
acc.start = datetime(2011, 11, 13, 9, 0) # tzinfo does not matter,
acc.end = datetime(2011, 11, 13, 10, 0) # it's set by subscription
# adapter
# If using EventAccessor's edit method, calling notify isn't needed
acc.edit(timezone=u'Europe/Vienna')
# accessor should return start/end datetimes in the event's timezone
self.assertEqual(
acc.start,
datetime(2011, 11, 13, 9, 0, tzinfo=vienna))
self.assertEqual(
acc.end,
datetime(2011, 11, 13, 10, 0, tzinfo=vienna))
# start/end dates are stored in UTC zone on the context, but converted
# to event's timezone via the attribute getter.
self.assertEqual(
e1.end(),
DateTime('2011/11/13 10:00:00 Europe/Vienna')
)
# timezone should be the same on the event object and accessor
self.assertEqual(e1.getTimezone(), acc.timezone)
# Open End Test
acc.edit(open_end=True)
self.assertEqual(
acc.start,
datetime(2011, 11, 13, 9, 0, tzinfo=vienna))
self.assertEqual(
acc.end,
datetime(2011, 11, 13, 23, 59, 59, tzinfo=vienna))
# Whole Day Test
acc.edit(whole_day=True, open_end=False)
self.assertEqual(
acc.start,
datetime(2011, 11, 13, 0, 0, tzinfo=vienna))
self.assertEqual(
acc.end,
datetime(2011, 11, 13, 23, 59, 59, tzinfo=vienna))
# TEST DESCRIPTION
self.assertTrue(acc.description == 'a description')
acc.description = 'another desc'
self.assertTrue(acc.description == 'another desc')
# TEST OTHER PROPERTIES
acc.title = u"An Event"
acc.recurrence = u'RRULE:FREQ=DAILY;COUNT=5'
acc.location = u"Home"
acc.attendees = [u'me', u'you']
acc.contact_name = u"Max Mustermann"
acc.contact_email = u"test@test.com"
acc.contact_phone = u"+1234567890"
acc.event_url = u"http://plone.org/"
acc.subjects = [u"tag1", u"tag2"]
acc.text = u"body text with <b>html</b> formating."
# If not using EventAccessor's edit method, call notify manually
notify(ObjectModifiedEvent(acc.context))
self.assertEqual(acc.recurrence, u'RRULE:FREQ=DAILY;COUNT=5')
self.assertEqual(acc.location, u'Home')
self.assertEqual(acc.attendees, (u'me', u'you'))
self.assertEqual(acc.contact_name, u"Max Mustermann")
self.assertEqual(acc.contact_email, u'test@test.com')
self.assertEqual(acc.contact_phone, u"+1234567890")
self.assertEqual(acc.event_url, u"http://plone.org/")
self.assertEqual(acc.subjects, (u"tag1", u"tag2"))
self.assertEqual(acc.text, u"body text with <b>html</b> formating.")
class PAEventATTest(unittest.TestCase):
layer = PAEventAT_INTEGRATION_TESTING
def setUp(self):
portal = self.layer['portal']
self.portal = portal
setRoles(portal, TEST_USER_ID, ['Manager'])
portal.invokeFactory(type_name='Event', id='event1', title='Event1')
self.obj = portal['event1']
set_env_timezone(TZNAME)
def _edit_atevent(self, obj):
dcEdit(obj)
obj.setLocation(OBJ_DATA['location'])
obj.setSubject(OBJ_DATA['subject'])
obj.setEventUrl(OBJ_DATA['eventUrl'])
obj.setStartDate(OBJ_DATA['startDate'])
obj.setEndDate(OBJ_DATA['endDate'])
obj.setContactName(OBJ_DATA['contactName'])
obj.setContactPhone(OBJ_DATA['contactPhone'])
obj.setContactEmail(OBJ_DATA['contactEmail'])
obj.setAttendees(OBJ_DATA['attendees'])
obj.setText(OBJ_DATA['text'])
obj.setTimezone(OBJ_DATA['timezone'])
notify(ObjectModifiedEvent(obj))
def test_implementsInterfaces(self):
"""Test if an ATEvent object implements all relevant interfaces.
"""
self.assertTrue(IEvent.providedBy(self.obj))
self.assertTrue(IEventRecurrence.providedBy(self.obj))
self.assertTrue(IATEvent.providedBy(self.obj))
self.assertTrue(IATEventRecurrence.providedBy(self.obj))
self.assertTrue(IATEvent_ATCT.providedBy(self.obj))
self.assertTrue(verifyObject(IATEvent_ATCT, self.obj))
def test_validation(self):
req = FakeRequest()
# Also return any given errors
req.form.update({'startDate': '2010-10-30'})
err = {'endDate': None}
errors = self.obj.validate(req, err)
self.assertEqual(errors, err)
# Bad input
req.form.update({'startDate': '2x10-10-30'})
req.form.update({'endDate': 'bla'})
errors = self.obj.validate(req, errors=None)
self.assertTrue('startDate' in errors)
self.assertTrue('endDate' in errors)
# Start date must be before end date
req.form.update({'startDate': '2010-10-30', 'endDate': '2010-10-01'})
errors = self.obj.validate(req, errors=None)
self.assertTrue('endDate' in errors)
# ... except if open_end=True
req.form.update({'startDate': '2010-10-30', 'endDate': '2010-10-01',
'openEnd': True})
errors = self.obj.validate(req, errors=None)
self.assertEqual(errors, {})
def test_edit(self):
new = self.obj
self._edit_atevent(new)
self.assertEqual(new.start_date, pydt(new.start()))
self.assertEqual(new.end_date, pydt(new.end()))
# TODO: sometimes the following tests fails, because of comparison of
# microseconds. Is it an rounding error?
# AssertionError: datetime.datetime(2012, 10, 25, 14, 2, 11, 855640,
# tzinfo=<DstTzInfo 'Europe/Vienna' CEST+2:00:00 DST>) !=
# datetime.datetime(2012, 10, 25, 14, 2, 11, 85564, tzinfo=<DstTzInfo
# 'Europe/Vienna' CEST+2:00:00 DST>)
self.assertEqual(new.start_date, pydt(OBJ_DATA['startDate']))
self.assertEqual(new.end_date, pydt(OBJ_DATA['endDate']))
self.assertEqual(new.duration, new.end_date - new.start_date)
def test_sane_start_end(self):
self.assertTrue(self.obj.start() <= self.obj.end())
def test_cmp(self):
portal = self.portal
e1 = self.obj
portal.invokeFactory(type_name='Event', id='event2', title='Event 2')
e2 = portal['event2']
day29 = DateTime('2004-12-29 0:00:00')
day30 = DateTime('2004-12-30 0:00:00')
day31 = DateTime('2004-12-31 0:00:00')
e1.edit(startDate=day29, endDate=day30, title='event')
e2.edit(startDate=day29, endDate=day30, title='event')
self.assertEqual(cmp(e1, e2), 0)
# start date
e1.edit(startDate=day29, endDate=day30, title='event')
e2.edit(startDate=day30, endDate=day31, title='event')
self.assertEqual(cmp(e1, e2), -1) # e1 < e2
# duration
e1.edit(startDate=day29, endDate=day30, title='event')
e2.edit(startDate=day29, endDate=day31, title='event')
self.assertEqual(cmp(e1, e2), -1) # e1 < e2
# title
e1.edit(startDate=day29, endDate=day30, title='event')
e2.edit(startDate=day29, endDate=day30, title='evenz')
self.assertEqual(cmp(e1, e2), -1) # e1 < e2
def test_get_size(self):
event = self.obj
self._edit_atevent(event)
self.assertEqual(event.get_size(), len(OBJ_DATA['text']))
def test_data_postprocessing(self):
# TODO: since we use an IEventAccessor here, this is a possible
# canditate for merging with
# the test_dxevent.TestDXIntegration.test_data_postprocessing test.
# Addressing bug #62
self.portal.invokeFactory(
'Event',
'ate1',
startDate=DateTime(2012, 10, 19, 0, 30),
endDate=DateTime(2012, 10, 19, 1, 30),
timezone="Europe/Vienna",
wholeDay=False
)
e1 = self.portal['ate1']
e1.reindexObject()
acc = IEventAccessor(e1)
# Prepare reference objects
tzname_1 = "Europe/Vienna"
tz_1 = pytz.timezone(tzname_1)
dt_1 = tz_1.localize(datetime(2012, 10, 19, 0, 30))
dt_1_1 = tz_1.localize(datetime(2012, 10, 19, 0, 0))
dt_1_2 = tz_1.localize(datetime(2012, 10, 19, 23, 59, 59))
tzname_2 = "Hongkong"
tz_2 = pytz.timezone(tzname_2)
dt_2 = tz_2.localize(datetime(2012, 10, 19, 0, 30))
dt_2_1 = tz_2.localize(datetime(2012, 10, 19, 0, 0))
dt_2_2 = tz_2.localize(datetime(2012, 10, 19, 23, 59, 59))
# See, if start isn't moved by timezone offset.
self.assertTrue(acc.start == dt_1)
notify(ObjectModifiedEvent(e1))
self.assertTrue(acc.start == dt_1)
# After timezone changes, only the timezone should be applied, but the
# date and time values not converted.
acc.timezone = tzname_2
notify(ObjectModifiedEvent(e1))
self.assertTrue(acc.start == dt_2)
# Likewise with wholeDay events. If values were converted, the days
# would drift apart.
acc.whole_day = True
acc.timezone = tzname_1
notify(ObjectModifiedEvent(e1))
self.assertTrue(acc.start == dt_1_1)
self.assertTrue(acc.end == dt_1_2)
acc.timezone = tzname_2
notify(ObjectModifiedEvent(e1))
self.assertTrue(acc.start == dt_2_1)
self.assertTrue(acc.end == dt_2_2)
class PAEventCMFEditTest(unittest.TestCase):
layer = PAEventAT_INTEGRATION_TESTING
def setUp(self):
portal = self.layer['portal']
self.portal = portal
setRoles(portal, TEST_USER_ID, ['Manager'])
set_env_timezone(TZNAME)
def testEventCreate(self):
self.portal.invokeFactory('Event', id='event',
title='Foo',
start_date='2003-09-18',
end_date='2003-09-19')
self.assertEqual(self.portal.event.Title(), 'Foo')
self.assertTrue(self.portal.event.start().ISO8601()
.startswith('2003-09-18T00:00:00'))
self.assertTrue(self.portal.event.end().ISO8601()
.startswith('2003-09-19T00:00:00'))
def testEventEdit(self):
self.portal.invokeFactory('Event', id='event')
if not getattr(self.portal.event, 'event_edit', None):
# event_edit script is removed in Plone 5
return
self.portal.event.event_edit(title='Foo',
start_date='2003-09-18',
end_date='2003-09-19')
self.assertEqual(self.portal.event.Title(), 'Foo')
self.assertTrue(self.portal.event.start().ISO8601()
.startswith('2003-09-18T00:00:00'))
self.assertTrue(self.portal.event.end().ISO8601()
.startswith('2003-09-19T00:00:00'))
class PAEventATFieldTest(unittest.TestCase):
layer = PAEventAT_INTEGRATION_TESTING
def setUp(self):
portal = self.layer['portal']
self.portal = portal
setRoles(portal, TEST_USER_ID, ['Manager'])
portal.invokeFactory(type_name='Event', id='event1', title='Event1')
self.obj = portal['event1']
def test_attendeesField(self):
field = self.obj.getField('attendees')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default == (),
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 1,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'getAttendees',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setAttendees',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'lines', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(isinstance(field.widget, atapi.LinesWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_contactEmailField(self):
field = self.obj.getField('contactEmail')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0,
'Value is %s' % field.required)
self.assertTrue(field.default == '',
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 1,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'contact_email',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setContactEmail',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'string', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(field.validators == EmailValidator,
'Value is %s' % str(field.validators))
self.assertTrue(isinstance(field.widget, atapi.StringWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_contactNameField(self):
field = self.obj.getField('contactName')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default == '',
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 1,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'contact_name',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setContactName',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'string', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(field.validators == EmptyValidator,
'Value is %s' % str(field.validators))
self.assertTrue(isinstance(field.widget, atapi.StringWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_contactPhoneField(self):
field = self.obj.getField('contactPhone')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default == '',
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 1,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'contact_phone',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setContactPhone',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'string', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertEqual(field.validators, EmptyValidator)
self.assertTrue(isinstance(field.widget, atapi.StringWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_endDateField(self):
field = self.obj.getField('endDate')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 1, 'Value is %s' % field.required)
self.assertTrue(field.default is None,
'Value is %s' % str(field.default))
self.assertTrue(field.default_method == default_end,
'Value is %s' % str(field.default_method))
self.assertTrue(field.searchable is False,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'end',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setEndDate',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'datetime', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(field.validators == (),
'Value is %s' % str(field.validators))
self.assertTrue(isinstance(field.widget, DatetimeWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_eventUrlField(self):
field = self.obj.getField('eventUrl')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default == '',
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 1,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'event_url',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setEventUrl',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'string', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertEqual(field.validators, URLValidator)
self.assertTrue(isinstance(field.widget, atapi.StringWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_locationField(self):
field = self.obj.getField('location')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default == '',
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 1,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'getLocation',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setLocation',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission == ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'string', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(field.validators == EmptyValidator,
'Value is %s' % str(field.validators))
self.assertTrue(isinstance(field.widget, atapi.StringWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_recurrenceField(self):
field = self.obj.getField('recurrence')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default == '',
'Value is %s' % str(field.default))
self.assertTrue(field.searchable is False,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'getRecurrence',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setRecurrence',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'string', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
# flatten nested tuples
valis = list(itertools.chain(*field.validators))
is_recval = False
for vali in valis:
is_recval = is_recval or isinstance(vali, RecurrenceValidator)
self.assertTrue(is_recval)
self.assertTrue(isinstance(field.widget, RecurrenceWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_startDateField(self):
field = self.obj.getField('startDate')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 1, 'Value is %s' % field.required)
self.assertTrue(field.default is None,
'Value is %s' % str(field.default))
self.assertTrue(field.default_method == default_start,
'Value is %s' % str(field.default_method))
self.assertTrue(field.searchable is False,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'start',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setStartDate',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'datetime', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(field.validators == (),
'Value is %s' % str(field.validators))
self.assertTrue(isinstance(field.widget, DatetimeWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
def test_subjectField(self):
field = self.obj.getField('subject')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default == (),
'Value is %s' % str(str(field.default)))
self.assertTrue(field.searchable == 1,
'Value is %s' % field.searchable)
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 1,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 1,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'Subject',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setSubject',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'mVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'lines', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.MetadataStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.MetadataStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(field.validators == EmptyValidator,
'Value is %s' % repr(field.validators))
self.assertTrue(isinstance(field.widget, atapi.KeywordWidget),
'Value is %s' % id(field.widget))
def test_textField(self):
field = self.obj.getField('text')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default == '',
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 1,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'getText',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setText',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission ==
ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'text', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AnnotationStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') ==
atapi.AnnotationStorage(migrate=True),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(field.validators == NotRequiredTidyHTMLValidator,
'Value is %s' % repr(field.validators))
self.assertTrue(isinstance(field.widget, atapi.RichWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))
self.assertTrue(field.primary == 1, 'Value is %s' % field.primary)
self.assertTrue(field.default_content_type is None,
'Value is %s' % field.default_content_type)
self.assertTrue(field.default_output_type == 'text/x-html-safe',
'Value is %s' % field.default_output_type)
self.assertTrue('text/html' in field.getAllowedContentTypes(self.obj))
def test_timezoneField(self):
field = self.obj.getField('timezone')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 1, 'Value is %s' % field.required)
self.assertTrue(field.default == '',
'Value is %s' % str(field.default))
self.assertTrue(field.default_method == default_timezone,
'Value is %s' % str(field.default_method))
self.assertTrue(field.searchable == 0,
'Value is %s' % field.searchable)
self.assertTrue(field.vocabulary == (),
'Value is %s' % str(field.vocabulary))
self.assertTrue(field.vocabulary_factory ==
u'plone.app.event.AvailableTimezones')
self.assertTrue(field.enforceVocabulary is True,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'getTimezone',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setTimezone',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission == ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'string', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertTrue(field.validators == (),
'Value is %s' % str(field.validators))
self.assertTrue(isinstance(field.widget, atapi.SelectionWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue('Africa/Abidjan' in tuple(vocab),
'Value is %s' % str(tuple(vocab)))
def test_wholeDayField(self):
field = self.obj.getField('wholeDay')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default is False,
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 0,
'Value is %s' % field.searchable)
self.assertTrue(
field.vocabulary == (('True', 'Yes', 'yes'),
('False', 'No', 'no')),
'Value is %s' % str(field.vocabulary)
)
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'getWholeDay',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setWholeDay',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission == ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'boolean', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertEqual(field.validators, EmptyValidator)
self.assertTrue(isinstance(field.widget, atapi.BooleanWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(
tuple(vocab) == ('True', 'False'),
'Value is %s' % str(tuple(vocab))
)
def test_openEndField(self):
field = self.obj.getField('openEnd')
self.assertTrue(ILayerContainer.providedBy(field))
self.assertTrue(field.required == 0, 'Value is %s' % field.required)
self.assertTrue(field.default is False,
'Value is %s' % str(field.default))
self.assertTrue(field.searchable == 0,
'Value is %s' % field.searchable)
self.assertTrue(
field.vocabulary == (('True', 'Yes', 'yes'),
('False', 'No', 'no')),
'Value is %s' % str(field.vocabulary)
)
self.assertTrue(field.enforceVocabulary == 0,
'Value is %s' % field.enforceVocabulary)
self.assertTrue(field.multiValued == 0,
'Value is %s' % field.multiValued)
self.assertTrue(field.isMetadata == 0,
'Value is %s' % field.isMetadata)
self.assertTrue(field.accessor == 'getOpenEnd',
'Value is %s' % field.accessor)
self.assertTrue(field.mutator == 'setOpenEnd',
'Value is %s' % field.mutator)
self.assertTrue(field.read_permission == View,
'Value is %s' % field.read_permission)
self.assertTrue(field.write_permission == ModifyPortalContent,
'Value is %s' % field.write_permission)
self.assertTrue(field.generateMode == 'veVc',
'Value is %s' % field.generateMode)
self.assertTrue(field.force == '', 'Value is %s' % field.force)
self.assertTrue(field.type == 'boolean', 'Value is %s' % field.type)
self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),
'Value is %s' % type(field.storage))
self.assertTrue(
field.getLayerImpl('storage') == atapi.AttributeStorage(),
'Value is %s' % field.getLayerImpl('storage')
)
self.assertEqual(field.validators, EmptyValidator)
self.assertTrue(isinstance(field.widget, atapi.BooleanWidget),
'Value is %s' % id(field.widget))
vocab = field.Vocabulary(self.obj)
self.assertTrue(isinstance(vocab, atapi.DisplayList),
'Value is %s' % type(vocab))
self.assertTrue(
tuple(vocab) == ('True', 'False'),
'Value is %s' % str(tuple(vocab))
)
def test_openEnd_handler(self):
event_id = self.portal.invokeFactory(
'Event',
id="event",
startDate='2000/10/12 06:00:00',
endDate='2000/10/14 18:00:00',
timezone=TZNAME,
openEnd=True
)
event = self.portal[event_id]
self.assertTrue(event.getOpenEnd())
self.assertEqual(event.start().Time(), '06:00:00')
self.assertEqual(event.end().Date(), '2000/10/12')
self.assertEqual(event.end().Time(), '23:59:59')
def test_wholeday_handler(self):
event_id = self.portal.invokeFactory(
'Event',
id="event",
startDate='2000/10/12 06:00:00',
endDate='2000/10/13 18:00:00',
timezone=TZNAME,
wholeDay=True
)
event = self.portal[event_id]
self.assertTrue(event.getWholeDay())
self.assertEqual(event.start().Time(), '00:00:00')
self.assertEqual(event.end().Time(), '23:59:59')
def test_wholeday_handler_notwholeday(self):
event_id = self.portal.invokeFactory(
'Event',
id="event",
startDate='2000/10/12 06:00:00',
endDate='2000/10/13 18:00:00',
timezone=TZNAME
)
event = self.portal[event_id]
self.assertFalse(event.getWholeDay())
self.assertEqual(event.start().Time(), '06:00:00')
self.assertEqual(event.end().Time(), '18:00:00')
def test_timezone_handler(self):
event_id = self.portal.invokeFactory(
'Event',
id="event",
startDate='2000/10/12 06:00:00',
endDate='2000/10/13 18:00:00',
timezone=TZNAME
)
event = self.portal[event_id]
self.assertEqual(event.start().Time(), '06:00:00')
self.assertEqual(event.end().Time(), '18:00:00')
self.assertEqual(event.start().timezone(), TZNAME)
self.assertEqual(event.end().timezone(), TZNAME)
self.assertEqual(event.start_date.tzinfo.zone, TZNAME)
self.assertEqual(event.end_date.tzinfo.zone, TZNAME)
|
10,123 | c00873814c110f571396973e99ffc953b2e21788 | dict = {'naam': } |
10,124 | 59e446d26c5becfe8e7d4486e204d85a0b807dab | __author__ = 'Gina Pappagallo (gmp5vb)'
def greetings(msg):
print(str(msg)) |
10,125 | 4cc15ca62803def398ce31181c0a73184d8b28f6 | """
:Copyright: 2014-2023 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
import pytest
from byceps.services.tourney import (
tourney_match_comment_service,
tourney_match_service,
)
def test_hide_comment(api_client, api_client_authz_header, admin_user, comment):
comment_before = tourney_match_comment_service.get_comment(comment.id)
assert not comment_before.hidden
assert comment_before.hidden_at is None
assert comment_before.hidden_by is None
url = f'/api/v1/tourney/match_comments/{comment.id}/flags/hidden'
headers = [api_client_authz_header]
json_data = {'initiator_id': str(admin_user.id)}
response = api_client.post(url, headers=headers, json=json_data)
assert response.status_code == 204
comment_after = tourney_match_comment_service.get_comment(comment.id)
assert comment_after.hidden
assert comment_after.hidden_at is not None
assert comment_after.hidden_by is not None
assert comment_after.hidden_by.id == admin_user.id
def test_unhide_comment(
api_client, api_client_authz_header, admin_user, comment
):
tourney_match_comment_service.hide_comment(comment.id, admin_user)
comment_before = tourney_match_comment_service.get_comment(comment.id)
assert comment_before.hidden
assert comment_before.hidden_at is not None
assert comment_before.hidden_by is not None
url = f'/api/v1/tourney/match_comments/{comment.id}/flags/hidden'
headers = [api_client_authz_header]
json_data = {'initiator_id': str(admin_user.id)}
response = api_client.delete(url, headers=headers, json=json_data)
assert response.status_code == 204
comment_after = tourney_match_comment_service.get_comment(comment.id)
assert not comment_after.hidden
assert comment_after.hidden_at is None
assert comment_after.hidden_by is None
# helpers
@pytest.fixture()
def match(api_app, scope='module'):
return tourney_match_service.create_match()
@pytest.fixture()
def comment(api_app, match, user):
return tourney_match_comment_service.create_comment(
match.id, user, 'ยกVรกmonos!'
)
|
10,126 | d26f1a7e426c5a280e66638bc239ce4b69f7b71b | import sys
import numpy as np
import random
import os
import math
def usage():
print("Usage \n")
print("python3 packet_generator.py <num_packets> <payload_length> <radix> <stages> <output_file_name> \n")
print("output_file_name is optional will default to test.mem in the current working directory")
def packet_gen(num_packets,payload_length,radix,stages,output_file_name="test.mem"):
num_packets=int(num_packets)
payload_length=int(payload_length)
header_size = math.log(radix**stages,2)
format_str = str(payload_length)
formater_payload = "{:0"+str(payload_length)+"d}"
formater_header = "{:0"+str(int(header_size))+"d}"
fout = open(output_file_name, 'w+')
# print(formater)
print(" ")
for y in range(0,num_packets):
for x in range(0, payload_length):
if(x == 0):
payload = random.randint(0,1)
else:
payload = str(payload) + str(random.randint(0,1))
payload = formater_payload.format(int(payload))
print("payload data bits : "+ payload)
print("Header size in bits : "+ str(header_size))
for x in range(0, int(header_size)):
if(x == 0):
header = random.randint(0,1)
else:
header = str(header) + str(random.randint(0,1))
total_packet = formater_header.format(int(header)) + formater_payload.format(int(payload))
fout.write(str(total_packet)+'\r')
print("Header data bits : "+ str(header))
print("Overall Packet is : "+ str(total_packet))
print("-------------------------------------------------------")
fout.close()
#Main Function
def main():
if(len(sys.argv) < 5):
usage()
quit()
elif(len(sys.argv) == 6):
num_packets = int(sys.argv[1])
payload_length = int(sys.argv[2])
radix = int(sys.argv[3])
stages = int(sys.argv[4])
output_file_name = sys.argv[5]
packet_gen(num_packets,payload_length,radix,stages,output_file_name)
else:
num_packets = int(sys.argv[1])
payload_length = int(sys.argv[2])
radix = int(sys.argv[3])
stages = int(sys.argv[4])
packet_gen(num_packets,payload_length,radix,stages)
if __name__ == "__main__":
main() |
10,127 | 2cf767ba9dea293fb5df7725d9f89ab17e0d6a8a | ## https://leetcode.com/problems/max-increase-to-keep-city-skyline/description/
class Solution:
def findMaxInCol(self,grid,m,n):
ret = [0]*n
for j in range(n):
i=1
temp = grid[0][j]
while i<m:
if grid[i][j] > temp:
temp = grid[i][j]
i+=1
ret[j] = temp
return ret
def maxIncreaseKeepingSkyline(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m = len(grid)
n = len(grid[0])
maxSum = 0
maxInCol = self.findMaxInCol(grid,m,n)
for i in range(m):
maxInRow = max(grid[i])
for j in range(n):
currValue = maxInRow if maxInRow < maxInCol[j] else maxInCol[j]
maxSum+= abs(currValue - grid[i][j])
return maxSum
|
10,128 | eea67d437a504d06bd909c4b52a8d59d770620a1 | #!/bin/python
import platform
os_info = platform.platform()
print os_info
if os_info.find('Linux') >= 0 or os_info.find('Darwin') >= 0: # Linux or Mac build
env = Environment()
env.Append(CPPFLAGS = '-Wall -O3')
env.Append(CPPPATH = ['src',])
elif os_info.find('Windows') >= 0:
env = Environment(MSVC_USE_SCRIPT = 'C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\bin\\x86_amd64\\vcvarsx86_amd64.bat')
env.Append(CPPFLAGS = ['/c','/nologo','/W3','/WX-','/O2'])
else:
print 'unknown platform, exiting'
exit(1)
sources = [
'src/main.cc',
'src/MyDecoder.cc'
]
defaultBuild = env.Program(target = 'mipsdecode', source = sources)
Default(defaultBuild)
|
10,129 | e8842838829bb9d68f660c0635d1dbac14a3f1c4 | import csv
from selenium.webdriver.support.ui import Select
def selectdepaturereturn(self, d="none", r="none", prom_cod="none"):
find_element_text = "Departure_option_xpath"
session_name = "Booking"
Departure_date_selected = Select(self.get_path(session_name, find_element_text))
Departure_date = self.get_path(session_name, find_element_text)
# Departure_date_options = Departure_date.find_elements_by_tag_name("option")
Departure_date_options_len = len(Departure_date.find_elements_by_tag_name("option"))
find_element_text = "Return_option_xpath"
Return_date_selected = Select(self.get_path(session_name, find_element_text))
Return_date = self.get_path(session_name, find_element_text)
# Return_date_options = Return_date.find_elements_by_tag_name("option")
find_element_text = "Promotional_code_xpath"
input_prom_code = self.get_path(session_name, find_element_text)
input_prom_code.clear()
if d <> "none" and r <> "none":
if prom_cod <> "none":
Departure_date_selected.select_by_index(d)
Return_date_selected.select_by_index(r)
input_prom_code.send_keys(prom_cod)
selected_d = 'This is your selected departure: '
selected_d_t = Departure_date_selected.first_selected_option.text
print selected_d, d, selected_d_t
selected_r = 'This is your selected return: '
selected_r_t = Return_date_selected.first_selected_option.text
print selected_r, r, selected_r_t
prom_r = 'This is your entered promotional code: '
prom_cod
print prom_r, prom_cod
self.get_search_result()
return (Departure_date_options_len)
with open("test_result_air_book.csv", "w") as fillin:
fieldnames = ['Selected_Depature', 'Selected_Return', "Promotional"]
writer = csv.DictWriter(fillin, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(
{"Selected_Depature": selected_d_t, "Selected_Return": selected_r_t, "Promotional": prom_cod})
else:
Departure_date_selected.select_by_index(d)
Return_date_selected.select_by_index(r)
selected_d = 'This is your selected Departure: '
selected_d_t = Departure_date_selected.first_selected_option.text
print selected_d, d, selected_d_t
selected_r = 'This is your selected Return: '
selected_r_t = Return_date_selected.first_selected_option.text
print selected_r, r, selected_r_t
self.get_search_result()
# print ("invalid selection you provided")
# self.driver.quit()
|
10,130 | 68d93144b338174773b709a3ccca8294593122cc | #โ๐๐๐๐๐๐๐๐โ#
import sys
import math
from math import ceil, floor
import itertools
from functools import lru_cache
from collections import deque
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
'''''โ'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def is_prime(n):
n = abs(n)
if n == 2:
return True
if n == 1 or n & 1 == 0:
return False
d = n-1
while d & 1 == 0:
d >>= 1
seed_primes = [2,7,61] if n < 4294967296 else [2,3,5,7,11,13,17,19,23,29,31,37]
for a in seed_primes:
if a >= n:
continue
t = d
y = pow(a,t,n)
while t != n-1 and y != 1 and y != n-1:
y = (y * y) % n
t <<= 1
if y != n-1 and t & 1 == 0:
return False
return True
x=int(input())
while not is_prime(x):
x+=1
print(x)
|
10,131 | 8f2ea021c34f154961db57b8d28ab14f6a7e3beb | from datetime import datetime, timedelta, timezone
import time_machine
from translator.tools import date_tools
# --------------------------------------------------------------------------------unit test for parse_datetime_from_unix function--------------------------------------------------------------------------------
def test_parse_datetime_from_unix_valid():
time = 1609398000
expected = datetime(2020, 12, 31, 7, tzinfo=timezone.utc)
actual = date_tools.parse_datetime_from_unix(time)
assert actual == expected
def test_parse_datetime_from_unix_decimal():
time = 1615866698.393723
expected = datetime(2021, 3, 16, 3, 51, 38, tzinfo=timezone.utc)
actual = date_tools.parse_datetime_from_unix(time)
assert (actual - expected) < timedelta(seconds=1)
def test_parse_datetime_from_unix_string():
time = '1609398000'
expected = datetime(2020, 12, 31, 7, tzinfo=timezone.utc)
actual = date_tools.parse_datetime_from_unix(time)
assert actual == expected
def test_parse_datetime_from_unix_invalid_none():
time = None
expected = None
actual = date_tools.parse_datetime_from_unix(time)
assert actual == expected
def test_parse_datetime_from_unix_invalid_dict():
time = {}
expected = None
actual = date_tools.parse_datetime_from_unix(time)
assert actual == expected
# --------------------------------------------------------------------------------unit test for parse_datetime_from_iso_string function--------------------------------------------------------------------------------
def test_parse_datetime_from_iso_string_valid():
time_string = "2020-12-31T07:00:00Z"
expected = datetime(2020, 12, 31, 7, tzinfo=timezone.utc)
actual = date_tools.parse_datetime_from_iso_string(time_string)
assert actual == expected
def test_parse_datetime_from_iso_string_with_decimal():
time_string = "2020-12-31T07:00:00.123Z"
expected = datetime(2020, 12, 31, 7, 0, 0, 123000, tzinfo=timezone.utc)
actual = date_tools.parse_datetime_from_iso_string(time_string)
assert actual == expected
def test_parse_datetime_from_iso_string_invalid_none():
time_string = None
actual = date_tools.parse_datetime_from_iso_string(time_string)
assert actual == None
def test_parse_datetime_from_iso_string_invalid_num():
time_string = 12
actual = date_tools.parse_datetime_from_iso_string(time_string)
assert actual == None
# --------------------------------------------------------------------------------unit test for get_iso_string_from_datetime function--------------------------------------------------------------------------------
def test_get_iso_string_from_datetime_valid():
time = datetime(2020, 12, 31, 7, tzinfo=timezone.utc)
expected = "2020-12-31T07:00:00Z"
actual = date_tools.get_iso_string_from_datetime(time)
assert actual == expected
def test_get_iso_string_from_datetime_invalid_none():
time_string = None
actual = date_tools.get_iso_string_from_datetime(time_string)
assert actual == None
def test_get_iso_string_from_datetime_invalid_string():
time_string = "2020-12-31T07:00:00Z"
actual = date_tools.get_iso_string_from_datetime(time_string)
assert actual == None
# --------------------------------------------------------------------------------Unit test for get_event_status function--------------------------------------------------------------------------------
def test_get_event_status_active():
test_starttime_string = datetime(2020, 1, 1)
test_endtime_string = None
with time_machine.travel(datetime(2022, 1, 1)):
test_event_status = date_tools.get_event_status(
test_starttime_string, test_endtime_string)
valid_event_status = "active"
assert test_event_status == valid_event_status
def test_get_event_status_planned():
test_starttime_string = datetime(2021, 1, 1)
test_endtime_string = None
with time_machine.travel(datetime(2020, 1, 1)):
test_event_status = date_tools.get_event_status(
test_starttime_string, test_endtime_string)
valid_event_status = "planned"
assert test_event_status == valid_event_status
def test_get_event_status_pending():
test_starttime_string = datetime(2021, 1, 14)
test_endtime_string = None
with time_machine.travel(datetime(2021, 1, 1)):
test_event_status = date_tools.get_event_status(
test_starttime_string, test_endtime_string)
valid_event_status = "pending"
assert test_event_status == valid_event_status
def test_get_event_status_completed():
test_starttime_string = datetime(2020, 1, 1)
test_endtime_string = datetime(2021, 1, 1)
with time_machine.travel(datetime(2022, 1, 1)):
test_event_status = date_tools.get_event_status(
test_starttime_string, test_endtime_string)
valid_event_status = "completed"
assert test_event_status == valid_event_status
def test_get_event_status_none():
test_starttime_string = None
test_endtime_string = None
test_event_status = date_tools.get_event_status(
test_starttime_string, test_endtime_string)
valid_event_status = None
assert test_event_status == valid_event_status
def test_get_event_status_string():
test_starttime_string = "2020-12-31T07:00:00.123Z"
test_endtime_string = None
test_event_status = date_tools.get_event_status(
test_starttime_string, test_endtime_string)
valid_event_status = None
assert test_event_status == valid_event_status
|
10,132 | 3a8149808aadc48420f9050a93a167347b073a80 | def main():
N, K = (int(i) for i in input().split())
if (N+1)//2 >= K:
print("YES")
else:
print("NO")
if __name__ == '__main__':
main()
|
10,133 | ae1dc185a11658df7838a799c1318b7167c5051b | #Short and handy script to login and after "x" seconds logout from Stackoverflow
import requests
import config
import re
import datetime
import time
### DEFINING VARIABLES ###
headers = {
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
}
params = {
"ssrc": "head",
"returnurl": "https://stackoverflow.com/",
}
url = 'https://stackoverflow.com/users/login?ssrc=head&returnurl=https://stackoverflow.com/'
cre = config.cre()
usr = cre['USR']
pwd = cre['PWD']
### DEFINING functions ###
def find_fkey():
print('Searching for fkey...')
response = requests.get(url, params=params, headers=headers)
return re.search(r'"fkey":"([^"]+)"', response.text).group(1)
def get_payload():
fkey = find_fkey()
return {
'openid_identifier': '',
'password': pwd,
'fkey': fkey,
'email': usr,
'oauth_server': '',
'oauth_version': '',
'openid_username': '',
'ssrc': 'head',
}
def get_profile_url(session):
response = session.get("https://stackoverflow.com/")
html = response.text
profile_url = "https://stackoverflow.com" + re.search(r'<a href="(.+)" class="my-profile', html).group(1)
print('\033[92m Logged into profile: {} \033m'.format(profile_url))
return profile_url
def surfing(session, profile_url):
print('\033[92m Begin surfing...\033[0m')
url1 = 'https://stackoverflow.com/jobs/companies'
url2 = 'https://stackoverflow.com/questions'
url3 = 'https://stackoverflow.com/questions/927358/how-do-i-undo-the-most-recent-local-commits-in-git'
preference_url = 'https://stackoverflow.com/users/preferences/'
user_id = profile_url.split('/')
user_id = user_id[4]
url4 = f'{preference_url}{user_id}'
l = [url1, url2, url3, url4, url2, url1]
for elem in l:
stat = session.get(elem)
print(f'Accessing {elem}....Status: {stat.status_code}')
time.sleep(10)
print('\033[92m Surfing successfully executed! \033')
def login():
session = requests.session()
print('Start login ...')
payload = get_payload()
response = session.post(url, data=payload, headers=headers, params=params)
time.sleep(7)
if response.history:
print("\033[92m Successfully logged in: {} with fkey: {}\033[0m".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
payload['fkey']))
profile_url = get_profile_url(session)
session.get(profile_url)
surfing(session, profile_url)
else:
print('\033[31m Something went wrong, please check again maybe USR or PWD? \033[0m')
if __name__ == '__main__':
z = 0
while z < 9:
try:
login()
z = 9
time.sleep(5)
except Exception as e:
z += 1
print(e)
time.sleep(5)
|
10,134 | 0d8c6dec7b375a385c4df501e05eab2e5f685288 |
# coding: utf-8
# # Awele TME 1
# ##DE BEZENAC DE TOLDI
#
# In[9]:
# - - - - - - - - - - -
# IAMSI - 2016
# joueur d'Awele
# - - - - -
# REM: ce programme a ete ecrit en Python 3.4
#
# En salle machine : utiliser la commande "python3"
# - - - - - - - - - - -
# - - - - - - - - - - - - - - - INFORMATIONS BINOME
# GROUPE DE TD : 1
# NOM, PRENOM : DE BEZENAC EMMANUEL
# NOM, PRENOM : DE TOLDI MELCHIOR
# - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - TYPES UTILISES
# POSITION : dictionnaire non pleine qui contient differentes informations sur
# une position d'Awele, associees au nom de leur champ.
# COUP : valeur entiere comprise entre 1 et le nombre de colonnes du tablier
# - - - - - - - - - - - - - - - INITIALISATION
import numpy as np
def initialise(n):
""" int -> POSITION
Hypothese : n > 0
initialise la position de depart de l'awele avec n colonnes avec 4 dans chaque case.
"""
position = dict() # initialisation
position['tablier'] = [4 for k in range(0, 2*n)] # on met 4 graines dans chaque case
position['taille'] = n # le nombre de colonnes du tablier
position['trait'] = 'SUD' # le joueur qui doit jouer: 'SUD' ou 'NORD'
position['graines'] = {'SUD':0, 'NORD':0} # graines prises par chaque joueur
return position
# - - - - - - - - - - - - - - - AFFICHAGE (TEXTE)
def affichePosition(position):
""" POSITION ->
affiche la position de facon textuelle
"""
print('* * * * * * * * * * * * * * * * * * * *')
n = position['taille']
buffer = 'col:'
for i in range(0,n):
buffer += ' ' + str(i+1) + ' \t'
print(buffer)
print('\t\tNORD (prises: '+str(position['graines']['NORD'])+')')
print('< - - - - - - - - - - - - - - -')
buffer = ''
for i in range(2*n-1,n-1,-1): # indices n..(2n-1) pour les cases NORD
buffer += '\t[' + str(position['tablier'][i]) + ']'
print(buffer)
buffer = ''
for i in range(0,n): # indices 0..(n-1) pour les cases SUD
buffer += '\t[' + str(position['tablier'][i]) + ']'
print(buffer)
print('- - - - - - - - - - - - - - - >')
print('\t\tSUD (prises: '+str(position['graines']['SUD'])+')')
print('-> camp au trait: '+position['trait']);
# - - - - - - - - - - - - - - - CLONAGE
import copy
def clonePosition(position):
""" POSITION -> POSITION
retourne un clone de la position
(qui peut etre alors modifie sans alterer l'original donc).
"""
leclone = dict()
leclone['tablier'] = copy.deepcopy(position['tablier'])
leclone['taille'] = position['taille']
leclone['trait'] = position['trait']
leclone['graines'] = copy.deepcopy(position['graines'])
return leclone
# - - - - - - - - - - - - - - - JOUE UN COUP
def joueCoup(position,coup):
""" POSITION * COUP -> POSITION
Hypothese: coup est jouable.
Cette fonction retourne la position obtenue une fois le coup joue.
"""
nouvelle_pos = clonePosition(position) # on duplique pour ne pas modifier l'original
n = nouvelle_pos['taille']
trait = nouvelle_pos['trait']
# on transforme coup en indice
if trait == 'SUD':
indice_depart = coup-1
else:
indice_depart = 2*n-coup
# retrait des graines de la case de depart
nbGraines = nouvelle_pos['tablier'][indice_depart]
nouvelle_pos['tablier'][indice_depart] = 0
# on seme les graines dans les cases a partir de celle de depart
indice_courant = indice_depart
while nbGraines > 0:
indice_courant = (indice_courant + 1) % (2*n)
if (indice_courant != indice_depart): # si ce n'est pas la case de depart
nouvelle_pos['tablier'][indice_courant] += 1 # on seme une graine
nbGraines -= 1
# la case d'arrivee est dans le camp ennemi ?
if (trait == 'NORD'):
estChezEnnemi = (indice_courant < n)
else:
estChezEnnemi = (indice_courant >= n)
# realisation des prises eventuelles
while estChezEnnemi and (nouvelle_pos['tablier'][indice_courant] in range(2,4)):
nouvelle_pos['graines'][trait] += nouvelle_pos['tablier'][indice_courant]
nouvelle_pos['tablier'][indice_courant] = 0
indice_courant = (indice_courant - 1) % (2*n)
if (trait == 'NORD'):
estChezEnnemi = (indice_courant < n)
else:
estChezEnnemi = (indice_courant >= n)
# mise a jour du camp au trait
if trait == 'SUD':
nouvelle_pos['trait'] = 'NORD'
else:
nouvelle_pos['trait'] = 'SUD'
return nouvelle_pos
# ## EXERCICE 1
# In[16]:
import random
def coupJouable(position,nombre):
""" POSITION * NOMBRE(COUP) -> BOOLEAN
Cette fonction retourne un boolean indiquant si un coup est jouable ร partir de la position donnรฉe.
"""
jouable=False #deviens vrai si le coup est un nombre du tablier
pleine=False #deviens vrai si la case de dรฉpart possรจde plus d'une graine
if(nombre>=1 and nombre<=position['taille']):
jouable=True
#deux tests diffรฉrents pour nord et sud
if( position['trait']== "SUD"):
if(position['tablier'][nombre-1]>0):
#print str(position['tablier'][nombre-1]) + " graines"
pleine=True
if( position['trait']== "NORD"):
if(position['tablier'][2*position['taille']-nombre]>0):
pleine=True
return jouable and pleine
def coupAutorise(position,coup):
""" POSITION * COUP -> BOOLEAN || POSITION
Cette fonction retourne un boolean si le coup n'est pas autorisรฉ ou la position si le coup est autorisรฉ
"""
if(coupJouable(position,coup)):
pos = joueCoup(position,coup)
if(position['trait']=='NORD'):
if sum(pos['tablier'][0:pos['taille']]): #test si le tablier est vide (au moins une graine dedans)
return pos
if(position['trait']=='SUD'):
if sum(pos['tablier'][pos['taille']:2*pos['taille']]):
return pos
return False
def positionTerminale(position):
""" POSITION -> BOOLEAN
Cette fonction retourne un boolean qui prend la valeur vrai si la position est terminale (aucun coup legal jouable) et la valeur fausse sinon
"""
if(position['graines']['NORD']>=25 or position['graines']['SUD']>=25):
return True
for a in range(1,position['taille']+1):
if(coupAutorise(position,a)):
return False #on renvoie false dรจs qu'on obtient au moins un coup jouable
return True
def moteurHumains(taille=3):
""" TAILLE -> rien
Cette fonction permet ร deux joueurs humains de s'affronter, les faisant jouer chacun ร leur tour
"""
pos=initialise(taille)
while(not(positionTerminale(pos))): #on boucle tant que le joueur peux jouer
affichePosition(pos)
coup = pos['taille']+1
while(coup>pos['taille']):
coup=input('Rentre ton coup, c\'est ton tour!\n')
if(coupAutorise(pos,coup)):
pos=joueCoup(pos,coup)
else:
print('essaye encore...\n') #cas oรน le joueur ne rentre pas un coup lรฉgal
print("*** FINI ***")
affichePosition(pos)
if(pos['graines']['NORD'] == pos['graines']['SUD']):
print "Match Nul"
else:
gagnant = "Nord" if (pos['graines']['NORD'] > pos['graines']['SUD']) else "Sud"
print gagnant + " remporte la manche"
def choixAleatoire(position):
""" POSITION -> COUP
Cette fonction retourne un coup jouable alรฉatoire pour le cpu ou 0 si aucun coup n'est jouable ร partir de la position envoyรฉ
"""
if(positionTerminale(position)):
return 0
coupJouable = []
for a in range(1,position['taille']+1):
if coupAutorise(position,a):
coupJouable.append(a)
random.shuffle(coupJouable) #on mรฉlange le tableau des coups alรฉatoires
return(coupJouable[0])
def moteurAleatoire(campCPU = "NORD"):
""" CAMPCPU -> rien
Cette fonction permet ร un joueur d'affronter un ordinateur choississant ses coup alรฉatoirement dans la liste des coups possible ร chaque position
"""
taille = input("Quelle taille pour cette partie ?")
pos = initialise(taille)
affichePosition(pos)
print '*** on commence ***'
while(not(positionTerminale(pos))):
if(campCPU == pos['trait']):
coup = choixAleatoire(pos)
print 'CPU joue la case '+ str(coup)
pos = joueCoup(pos,coup)
affichePosition(pos)
else:
coup = 0 #on initialise avec un coup non lรฉgal pour pouvoir boucler
while(coup<1 or coup>pos['taille']):
coup=input('Rentre ton coup, c\'est ton tour!\n')
if(coupAutorise(pos,coup)):
pos=joueCoup(pos,coup)
else:
print('essaye encore...\n')
affichePosition(pos)
print("*** FINI ***")
if(pos['graines']['NORD'] == pos['graines']['SUD']):
print "Match Nul"
else:
gagnant = "NORD" if (pos['graines']['NORD'] > pos['graines']['SUD']) else "SUD"
print gagnant + " remporte la manche"
if campCPU == gagnant:
print 'perdre contre cette IA c\' est un peu la honte...'
# ## EXERCICE 2 miniMax
# In[21]:
def nbCase(c,p):
""" CAMP, POSITION -> INTEGER
Cette fonction renvoie le nombre de cases avec une ou deux graines dans le camp donnรฉ avec un postion donnรฉ
"""
nb = 0
if(c == 'NORD'):
for i in range(p['taille'],2*p['taille']):
if(p['tablier'][i]==1 or p['tablier'][i]==2):
nb+=1
if(c == 'SUD'):
for i in range(0,p['taille']):
if(p['tablier'][i]==1 or p['tablier'][i]==2):
nb+=1
return nb
def evaluation(position):
""" POSITION -> EVALUATION
Cette fonction renvoie l'รฉvaluation d'une postion en utilisant la fonction donnรฉe
"""
if(positionTerminale(position)):
if(position['graines']['NORD'] > position['graines']['SUD']):
return -1000
else:
return 1000
else:
return 2*position['graines']['NORD']+nbCase('NORD',position) - (2*position['graines']['SUD']+nbCase('SUD',position))
def evalueMiniMax(position,prof,coup=1):
""" POSITION, PROFONDEUR,COUP -> {MEILLEUR COUP,VALEUR DE CE COUP}
Cette fonction utilise l'algorithme minimax pour calculer le meilleur coup ร partir d'une position et d'une profondeur donnรฉes
"""
if prof == 0 or positionTerminale(position):
return {'coup':coup,'valeur':evaluation(position)}
if position['trait'] == 'NORD':
bestValue = - float('inf')
bestCoup = 0
for a in range(1,position['taille']+1):
if(coupAutorise(position,a)):
p = clonePosition(position)
p = joueCoup(position,a)
e = evalueMiniMax(p,prof-1,a)
if bestValue <= e['valeur']:
bestValue = e['valeur']
bestCoup = a
return {'coup':bestCoup,'valeur':bestValue}
else:
bestValue = float('inf')
bestCoup = 0
for a in range(1,position['taille']+1):
if(coupAutorise(position,a)):
p = clonePosition(position)
p = joueCoup(position,a)
e = evalueMiniMax(p,prof-1,a)
if bestValue >= e['valeur']:
bestValue = e['valeur']
bestCoup = a
return {'coup':bestCoup,'valeur':bestValue}
def choixMinimax(position,prof):
""" POSITION,PROFONDEUR -> COUP
Cette fonction renvoie le coup choisi par la fonction evalueMiniMax ou 0 si la position donnรฉe est terminale
"""
if(positionTerminale(position)):
return 0
else:
coup = evalueMiniMax(position,prof)
return coup['coup']
def moteurMiniMax(campCPU="NORD",prof=3):
""" CAMPCPU,PROFONDEUR -> Rien
Cette fonction permet ร l'utilisateur d'affronter un cpu utilisant l'algorithme du minimax pour choisir ses coups.
"""
taille = input("Quelle taille pour cette partie ?")
pos = initialise(taille)
affichePosition(pos)
print '*** on commence ***'
while(not(positionTerminale(pos))):
if(campCPU == pos['trait']):
coup = choixMinimax(pos,prof)
print 'CPU joue la case '+ str(coup)
pos = joueCoup(pos,coup)
affichePosition(pos)
else:
coup = pos['taille']+1
while(coup>pos['taille']):
coup=input('Rentre ton coup, c\'est ton tour!\n')
if(coupAutorise(pos,coup)):
pos=joueCoup(pos,coup)
else:
print('essaye encore...\n')
affichePosition(pos)
print("*** FINI ***")
if(pos['graines']['NORD'] == pos['graines']['SUD']):
print "Match Nul"
else:
gagnant = "NORD" if (pos['graines']['NORD'] > pos['graines']['SUD']) else "SUD"
print gagnant + " remporte la manche"
if campCPU == gagnant:
print 'perdre contre cette IA c\' est un peu la honte...'
# ## EXERCICE 3: ALPHA-BETA
# In[22]:
def evalueAlphaBeta(position,prof,i,alpha,beta):
""" POSITION, PROFONDEUR,COUP,ALPHA,BETA -> {MEILLEUR COUP,VALEUR DE CE COUP}
Cette fonction utilise l'algorithme alpha beta pour calculer le meilleur coup ร partir d'une position et d'une profondeur donnรฉes
"""
if prof==0 or positionTerminale(position): #si la position est terminale, ou le parcours de l'arbre est fini
return {'coup':1,'valeur':evaluation(position)}
else:
bestCoup=0
j=position['taille']
if position['trait']=='NORD':
#position est MAX
i=1
while(i<=j and alpha<beta): #tant qu'on ne peut pas รฉlaguer
if(coupAutorise(position,i)):
p=clonePosition(position)
p=joueCoup(position,i)
e=evalueAlphaBeta(p,prof-1,i,alpha,beta)
if(alpha<e['valeur']): #on stocke l'alpha min
bestCoup=i
alpha=e['valeur']
i+=1
return {'coup':bestCoup,'valeur':alpha}
else:
#position est MIN
i=1
while(i<=j and alpha<beta): #tant qu'on ne peut pas รฉlaguer
if(coupAutorise(position,i)):
p=clonePosition(position)
p=joueCoup(position,i)
e=evalueAlphaBeta(p,prof-1,i,alpha,beta)
if(beta>e['valeur']): #on stocke le beta max
bestCoup=i
beta=e['valeur']
i+=1
return {'coup':bestCoup,'valeur':beta}
def choixAlphaBeta(position,prof):
""" POSITION,PROFONDEUR -> COUP
Cette fonction renvoie le coup choisi par la fonction evalueAlphaBeta ou 0 si la position donnรฉe est terminale
"""
if(positionTerminale(position)):
return 0
else:
coup=evalueAlphaBeta(position,prof,1,-np.inf,np.inf)
return coup['coup']
def moteurAlphaBeta(campCPU="NORD",prof=3):
""" CAMPCPU,PROFONDEUR -> Rien
Cette fonction permet ร l'utilisateur d'affronter un cpu utilisant l'algorithme de l'apha beta pour choisir ses coups.
"""
taille = input("Quelle taille pour cette partie ?")
pos = initialise(taille)
affichePosition(pos)
print '*** on commence ***'
while(not(positionTerminale(pos))):
if(campCPU == pos['trait']):
coup = choixAlphaBeta(pos,prof)
print 'CPU joue la case '+ str(coup)
pos = joueCoup(pos,coup)
affichePosition(pos)
else:
coup = pos['taille']+1
while(coup>pos['taille']):
coup=input('Rentre ton coup, c\'est ton tour!\n')
if(coupAutorise(pos,coup)):
pos=joueCoup(pos,coup)
else:
print('essaye encore...\n')
affichePosition(pos)
print("*** FINI ***")
if(pos['graines']['NORD'] == pos['graines']['SUD']):
print "Match Nul"
else:
gagnant = "NORD" if (pos['graines']['NORD'] > pos['graines']['SUD']) else "SUD"
print gagnant + " remporte la manche"
if campCPU == gagnant:
print 'perdre contre cette IA c\' est un peu la honte...'
# #Approfondissement
#
# Afin de trouver les paramรจtres optimaux de la fonction d'รฉvaluation nous avons mis en place un algorithme gรฉnรฉtique, ici avec un minimax (trรจs long) mais facilement adaptable avec un algorithme alpha beta.
#
# On a dรฉcomposรฉ la fonction d'รฉvaluation en 6 paramรจtres A,B,C,D,E et F (cf evaluationCpu).
#
# Ensuite on procรจde aux รฉtapes habituelles d'un algorithme gรฉnรฉtique :
#
# * Crรฉation de la population de la gรฉnรฉration 0 `genererPopulation(nombre d'indidividus)`
# * Crรฉation des individus de cettes population `genererIndividu`
# * On fait une boucle sur le nombre de gรฉnรฉration voulu (ex 50)
# * On fait s'affrontter les individus par "poules" pour allรฉger les calculs, ex: sur un population de 50 on fait 5 poules de 10. Dans chaque poules tout les individus s'affrontent en utilisant leurs propre fonction d'รฉvaluation : `tournoi`
# * On rรฉcupรจre les parents : les x premiers de chaque poules.
# * Les parents se "reproduisent", les autres sont รฉliminรฉs, il y une petite probabilitรฉ qu"un des parents mue: `reproduction` et `mutation`
# * C'est la fonction `nouvelleGeneration` qui gรจre tout cela
# * Une fois qu'on a la gรฉnรฉration finale, on les fait s'affronter une derniรจre fois et on rรฉcupรจre le "Champion"
#
# On a remarquรฉ un pourcentage de rรฉussite allant jusqu'ร 70% (affrontement du champion contre 1000 individus utilisant minimax avec une fonction d'รฉvalutation allรฉatoire) avec certains paramรจtre, mais cela reste variable et la mรฉthode n'a pas รฉtรฉ trรจs concluante.
#
# Les fonction xxxxCpu sont les fonctions usuelles adaptรฉes ร une fonction d'รฉvalutation variable.
#
# Le meilleur champion trouvรฉ est :
# {'A': 0.5059132062875165, 'C': 0.6081366767125125, 'B': 0.596495474468575, 'E': 0.46028412950161013, 'D': 0.7411628325957265, 'F': 0.3750175831732554, 'generation': 0, 'nbVictoire': 46, 'camp': 'SUD'}
#
# In[28]:
def autreCamp(a):
if a == "NORD":
return "SUD"
else:
return "NORD"
def evaluationCpu(position,cpu):
if(positionTerminale(position)):
if(position['graines']["SUD"] > position['graines']["NORD"]):
return cpu["A"]*(-10000)
else:
return cpu["B"]*(10000)
else:
return cpu["C"]*2*position['graines']["NORD"]+cpu["D"]*nbCase("NORD",position) - (cpu["E"]*2*position['graines']["SUD"]+cpu["F"]*nbCase("SUD",position))
def evalueMiniMaxCpu(position,prof,campCPU,cpu,coup=1):
if prof == 0 or positionTerminale(position):
return {'coup':coup,'valeur':evaluationCpu(position,cpu)}
if position['trait'] == "NORD":
bestValue = - float('inf')
bestCoup = 0
for a in range(1,position['taille']+1):
if(coupAutorise(position,a)):
p = clonePosition(position)
p = joueCoup(position,a)
e = evalueMiniMaxCpu(p,prof-1,campCPU,cpu,a)
if bestValue <= e['valeur']:
bestValue = e['valeur']
bestCoup = a
return {'coup':bestCoup,'valeur':bestValue}
else:
bestValue = float('inf')
bestCoup = 0
for a in range(1,position['taille']+1):
if(coupAutorise(position,a)):
p = clonePosition(position)
p = joueCoup(position,a)
e = evalueMiniMaxCpu(p,prof-1,campCPU,cpu,a)
if bestValue >= e['valeur']:
bestValue = e['valeur']
bestCoup = a
return {'coup':bestCoup,'valeur':bestValue}
def choixMinimaxCpu(position,prof,campCPU,cpu):
if(positionTerminale(position)):
return 0
else:
coup = evalueMiniMaxCpu(position,prof,campCPU,cpu)
return coup['coup']
def moteurCpuVsCpu(cpu1,cpu2,prof = 3,taille = 6):
coupSansPrise = 0
al = random.random()
if(al > .5):
cpu1['camp'] = 'SUD'
cpu2['camp'] = 'NORD'
else:
cpu2['camp'] = 'SUD'
cpu1['camp'] = 'NORD'
i=0
pos = initialise(taille)
while(not(positionTerminale(pos))):
graineNord = pos['graines']['NORD']
graineSud = pos['graines']['SUD']
i+=1
if i%500 == 0:
print i
if(cpu1['camp'] == pos['trait']):
coup = choixMinimaxCpu(pos,prof,cpu1['camp'],cpu1)
pos = joueCoup(pos,coup)
else:
coup = choixMinimaxCpu(pos,prof,cpu2['camp'],cpu2)
pos = joueCoup(pos,coup)
if(graineNord == pos['graines']['NORD'] and graineSud == pos['graines']['SUD']):
coupSansPrise +=1
else:
coupSansPrise = 0
if coupSansPrise>10:
break
gagnant = "NORD" if (pos['graines']['NORD'] > pos['graines']['SUD']) else "SUD"
if(cpu2['camp']==gagnant):
return 2
else:
return 1
import numpy as np
from operator import itemgetter
def genererIndividu():
param = [random.random() for a in range(6) ]
return {'camp':"","A":param[0],"B":param[1],"C":param[2],"D":param[3],"E":param[4],"F":param[5],'generation':0,'nbVictoire':0}
def genererPopulation(n):
pop = []
for i in range(n):
pop.append(genererIndividu())
print 'population crรฉe !'
return pop
def tournoi(pop,nbSelec):
for i in range(len(pop)):
for j in range(i,len(pop)):
if(i!=j):
if(moteurCpuVsCpu(pop[i],pop[j])==1):
pop[i]['nbVictoire']+=1
else:
pop[j]['nbVictoire']+=1
newPop = sorted(pop, key=itemgetter('nbVictoire'),reverse=True)
return newPop[0:nbSelec]
def reproduction(parent1,parent2):
enfant = {'camp':"","A":0,"B":0,"C":0,"D":0,"E":0,"F":0,'generation':0,'nbVictoire':0}
for l in ['A','B','C','D','E','F']:
alpha = .5
enfant[l] = parent1[l]*alpha + parent2[l]*(1-alpha)
return enfant
def mutation(individu):
parametreAChanger = random.randrange(0,6)
letters = ['A','B','C','D','E','F']
mutant = individu
mutant[letters[parametreAChanger]] = random.random()
return mutant
def nouvelleGeneration(parents,n):
nouvelleGen = []
for p in parents:
p['nbVictoire'] = 0
nouvelleGen.append(p)
a = random.random()
if a > .95:
nouvelleGen.append(mutation(p))
while len(nouvelleGen) < n:
p1 = random.randrange(0,len(parents))
p2 = random.randrange(0,len(parents))
if p1 != p2:
bebe = reproduction(parents[p1],parents[p2])
nouvelleGen.append(bebe)
return nouvelleGen
# ### example de l'algorithme
# In[ ]:
pop = genererPopulation(10) #gรฉnรฉration de la population 0 (ici valeur trรจs faible pour exemple, dans l'idรฉal mettre des valeurs bien plus grandes et utiliser alpha beta)
for i in range(10): #une itรฉration par gรฉnรฉration
print "generation : " + str(i)
pop1 = tournoi(pop[0:5],1)
pop2 = tournoi(pop[5:10],1) #sรฉparation en poule
pop = nouvelleGeneration([pop1[0],pop2[0]],10) #crรฉation de la nouvelle gรฉnรฉration
winner = tournoi(pop,1) #dernier tournoi pour trouver le champion
winner = winner[0]
print winner
pop = genererPopulation(1000) #batterie de test
score = 0
for i in range(len(pop)):
if i%100 == 0:
print str(i)+" matchs !"
print str(score/((i+1.)/100))+"% de rรฉussite, pas mal la bรจte !"
res = moteurCpuVsCpu(winner,pop[i])
if res == 1:
score+=1
print str(score/10)+"% de rรฉussite, pas mal la bรจte !"
|
10,135 | 0be99279635d8b5185fb29b2dd272f51d2d6c8f0 | import apps.configs.configuration as conf
from apps.configs.vars import Vars
from apps.models.tarea_programada import TareaProgramada,StatusTarea
from apps.models.receptor_estado import ReceptorDeEstado
from apps.utils.system_util import path_join
import json
from apps.models.exception import AppException
from apps.utils.logger_util import get_logger
import apps.utils.scheduler_util as scheduler_util
from typing import List,Dict
from datetime import date
from apps.services.mail_service import enviar_mail
_sched = scheduler_util.crear_scheduler()
_ARCHIVO_CONFIG="config_tareas.json"
logger = get_logger(__name__)
def _funcion_vacia():
print("f vacia")
def _funcion_wrapper(*args):
tarea = list(args).pop(0)
if not tarea.activa:
return
funcion_eval = tarea.modulo_externo.get_funcion("evaluar") if tarea.modulo_externo.contiene_funcion("evaluar") else lambda a,t: eval
args = [tarea]+[funcion_eval(a,tarea) for a in tarea.modulo_externo.argumentos]
logger.info(f"EJECUTANDO TAREA {tarea.id} ...")
success,results = tarea.modulo_externo.get_funcion()(*args)
status_tarea = StatusTarea.ok if success else StatusTarea.failed
logger.info(f"TAREA {tarea.id} TERMINADA CON STATUS: {status_tarea}")
receptor = tarea.obtener_receptor(status_tarea)
if receptor.activo:
logger.info(f"ARMANDO MAIL ...")
#CLONE
receptor = ReceptorDeEstado.from_dict(receptor.to_dict())
receptor_parameters = [date.today(),status_tarea.value]+list(results)
funcion_eval = tarea.modulo_externo.get_funcion("evaluar") if tarea.modulo_externo.contiene_funcion("evaluar") else lambda a,t: eval
receptor.actualizar_template(funcion_eval,*receptor_parameters)
logger.info(f"ENVIANDO MAIL ...")
enviar_mail(receptor)
logger.info(f"MAIL ENVIADO.")
def _crear_cron_tarea(tarea:TareaProgramada):
'''
Crea el cron correspondiente a la tarea programada
'''
funcion_eval = tarea.modulo_externo.get_funcion("evaluar") if tarea.modulo_externo.contiene_funcion("evaluar") else lambda a,t: eval
job = scheduler_util.agregar_job(_sched, _funcion_vacia, tarea.cron,tarea.id)
job.args = [tarea]+tarea.modulo_externo.argumentos
job.func = _funcion_wrapper
def _eliminar_cron_tarea(id_tarea:str):
'''
Elimina el cron correspondiente a la tarea programada
'''
scheduler_util.remover_job(_sched,id_tarea)
def ejecutar_tarea(id_tarea:str):
'''
Ejecuta la tarea por id
'''
scheduler_util.ejecutar_job(_sched,id_tarea)
def get_tarea(id_tarea:str):
'''
Retorna una tareas por id
'''
return list(filter(lambda t:t.id==id_tarea,get_tareas()))[0]
def get_tareas():
'''
Retorna la lista de tareas programadas
'''
ruta_config = path_join(conf.get(Vars.DIRECTORIO_FILES),_ARCHIVO_CONFIG)
with open(ruta_config,"r") as f:
tareas_programadas = json.load(f)
return [TareaProgramada.from_dict(t) for t in tareas_programadas]
def actualizar_atributos_tarea(id_tarea:str,atributos_nuevos:Dict):
'''
Modifica los atributos pasados en el diccionario de la tarea ya existente con id: id_tarea
'''
tarea_a_actualizar = get_tarea(id_tarea)
tarea_a_actualizar_dict = tarea_a_actualizar.to_dict()
tarea_a_actualizar_dict.update(atributos_nuevos)
eliminar_tarea(id_tarea)
agregar_tarea(TareaProgramada.from_dict(tarea_a_actualizar_dict))
return tarea_a_actualizar_dict
def save_tareas_programadas(tareas_programadas:List[TareaProgramada]):
'''
Retorna la lista de tareas programadas
'''
ruta_config = path_join(conf.get(Vars.DIRECTORIO_FILES),_ARCHIVO_CONFIG)
with open(ruta_config,"w") as f:
tareas_programadas_dict=[t.to_dict() for t in tareas_programadas]
json.dump(tareas_programadas_dict,f)
def agregar_tarea(tarea:TareaProgramada):
'''
Crea el cron con la tarea programada y persiste la config
'''
ruta_config = path_join(conf.get(Vars.DIRECTORIO_FILES),_ARCHIVO_CONFIG)
tareas_programadas = get_tareas()
if(any(tarea.id==t.id for t in tareas_programadas)):
raise AppException("TAREA_EXISTENTE",f"La tarea con el id {tarea.id} ya existe")
_crear_cron_tarea(tarea)
tareas_programadas.append(tarea)
save_tareas_programadas(tareas_programadas)
def eliminar_tarea(id_tarea:str):
'''
Borra la tarea correspondiente a id_tarea
'''
tareas_programadas = get_tareas()
tareas_programadas = list(filter(lambda t:t.id!=id_tarea,tareas_programadas))
_eliminar_cron_tarea(id_tarea)
save_tareas_programadas(tareas_programadas)
def iniciar_proceso_automatico():
'''
Inicia el scheduler con las tareas configuradas
'''
tareas = get_tareas()
logger.info(f'Iniciando proceso automatico ...')
for tarea in tareas:
_crear_cron_tarea(tarea)
scheduler_util.inciar_scheduler(_sched)
|
10,136 | 9eecfc950df298c1de1c1c596f981051b2eb4f89 |
import random
#-----Entradas----#
MENSAJE_SALUDAR = '''
Bienvenido
a este programa,
!!!jueguemos!!'''
MENSAJE_SEGUNDO_NIVEL = 'Felicidades pasaste el primer nivel ahora ve por el รบltimo!!'
MENSAJE_CALIENTE = 'Estas caliente'
MENSAJE_FRIO = 'Estas Frio'
PREGUNTAR_NUMERO = '''
En este juego debes asetar un nรบmero entero
que va desde el 1-10, la idea es que
lo puedes intentar ante de que se te
acaben las vidas...no existe vida 0
Muchos รฉxitos, ingresa tu numero
'''
PREGUNTA_DIFICULTAD = '''
1- Fรกcil
2- Moderado
3- Difรญcil
'''
PREGUNTAR_FALLIDA = 'Aaaaah! Fallaste โปโบโปโฅโฆโฃโ โข ingresa otro nรบmero :'
MENSAJE_GANASTE = 'Felicidades ganaste!!'
MENSAJE_PERDISTE = 'Perdiste D: "vuelve" a intentarlo!!'
#---Entrada al cรณdigo---#
numeroOculto = random.randint(1,10)
numeroOcultoDos = random.randint (1,10)
vidas = None
dificultad = int (input(PREGUNTA_DIFICULTAD))
while (dificultad !=1 and dificultad != 2 and dificultad !=3 ):
print ('ingresa una opciรณn vรกlida')
dificultad = int (input(PREGUNTA_DIFICULTAD))
if (dificultad == 1):
print ('Modo fรกcil activado')
vidas = 10
elif (dificultad == 2):
print ('Modo moderado activado')
vidas = 5
else :
print ('Modo Difรญcil activado, sssss mucho cuidado')
vidas = 2
numeroIngresado = int (input(PREGUNTAR_NUMERO))
while (numeroIngresado != numeroOculto and vidas>1):
if (numeroIngresado> numeroOculto):
print (MENSAJE_CALIENTE)
else:
print(MENSAJE_FRIO)
vidas -=1
print (f'te quedan {vidas} vidas')
numeroIngresado =int(input(PREGUNTAR_FALLIDA))
if (vidas >= 0 and numeroIngresado == numeroOculto):
print (MENSAJE_SEGUNDO_NIVEL)
numeroIngresado = int (input(PREGUNTAR_NUMERO))
while (numeroIngresado != numeroOcultoDos and vidas>1):
if (numeroIngresado > numeroOcultoDos):
print (MENSAJE_CALIENTE)
else:
print(MENSAJE_FRIO)
vidas -=1
print (f'te quedan {vidas} vidas')
numeroIngresado =int(input(PREGUNTAR_FALLIDA))
if (vidas >= 0 and numeroIngresado == numeroOcultoDos ):
print (MENSAJE_GANASTE)
else:
print (MENSAJE_PERDISTE,
'El numero uno era el ',
numeroOculto,
'El nรบmero dos era el',
numeroOcultoDos) |
10,137 | f1d36444e6a1c54ff572586fa59566a52384819e | #!/usr/bin/python
#import os
import json
import sys
#import time
from datetime import datetime as dt
from datetime import date, timedelta
import pdb
import smtplib
#from time import strftime
#from types import *
import ConfigParser
import operator
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from optparse import OptionParser
import jira_utils
class IssueClass:
def __init__(self):
self.assignee = ""
self.assignee_email = ""
self.icdt = "" # Issue create date time
self.issue_id = ""
self.issue_parent = ""
self.issue_type = ""
self.sprint = ""
self.stalled = False
self.status = ""
self.summary = ""
self.time_in_status = ""
self.subtasks = {}
class JiraAlert:
def __init__(self):
self.config = "" # for ConfigParser
self.current_status = {} # Dictionary of current status, days_in_status = 0
self.issues = []
self.issue_objs = {} # Dictionary to hold IssueClass objects
self.issues_over_limit = {} # Issues that have been in status too long
self.issues_types = ['New Feature', 'Sub-task'] # Issue types we will check status of (.ini config file sections)
self.log_offset = 0 # How old in days the last log file is, added to each status days
self.nf_objs = {}
self.output_file = ""
self.search_step = 50
self.stalled_nf_issues = []
self.stalled_st_issues = []
self.time_in_status = {} # Status times for issues
# for issue_type in self.issues_types:
def create_issue_objs(self):
""" Gets issues from self.issues[], make IssueClass objects, put in self.issue_obj dictionary.
Populate objects with data from their issue.
calls get_time_in_status() to get some info.
"""
print "Creating IssueClass objects"
# Create IssueClass objects, add to issue_objs dictionary
for issue in self.issues:
# print json.dumps(issue, indent=4)
if issue['fields']['issuetype']['name'] == "Sub-task" and issue['fields']['parent']['fields']['issuetype']['name'] != "New Feature":
continue # Skip sub-tasks whose parents are not New features
ic = IssueClass() # Create IssueClass object for each issue, assign data from issue to object's variables
ic.assignee = issue['fields']['assignee']['name']
ic.assignee_email = issue['fields']['assignee']['emailAddress']
ic.issue_id = issue['key']
ic.issue_type = issue['fields']['issuetype']['name']
ic.summary = issue['fields']['summary']
ic.status = issue['fields']['status']['name']
self.issue_objs[issue['key']] = ic # Add object to main object dictionary
if ic.issue_type == "Sub-task":
ic.issue_parent = issue['fields']['parent']['key'] # Get Sub-task parent
try:
ic.sprint = issue['fields']['customfield_10264'][0]['value'] # Get current sprint
except TypeError:
pass # Some issues have no sprint
# Brand new issues less than change_period with no changes yet are considered a "change of status".
ic.icdt = dt.strptime(issue['fields']['created'].split('.')[0], "%Y-%m-%dT%H:%M:%S") # Item create datetime
if (issue['fields']['issuetype']['name'] == "New Feature") and \
ic.icdt.date() > date.today()-timedelta(days=int(self.config.get('default', 'change_period'))):
ic.last_sprint = "" # Only objects with a last_sprint or last_status attribute will be checked for changes within change_period
ic.last_status = "" # Set last_sprint and last_status to null for issues less than change_period old
# Get time in status for the issues we're interested in, also updates sprint/last_sprint, status/last_status
self.get_time_in_status(issue, ic.status)
def get_stalled_issues(self):
""" Goes thru self.issue_objs, applies limits, updates self.stalled_issues """
print "Getting stalled issues"
# Mark issues stalled if over limit
for v in self.issue_objs.values():
if (v.issue_type == "New Feature") and (v.status not in ["Open", "In Progress", "Reopened", "Resolved", "Closed"]):
if int(v.time_in_status) > int(self.config.get(v.issue_type, v.status)):
v.stalled = True
for st in v.subtasks.values():
if (st.status == "In Progress") and (st.time_in_status > int(self.config.get(v.issue_type, v.status))):
st.stalled = True
elif (st.status == "In Progress") and (st.time_in_status < int(self.config.get(v.issue_type, v.status))):
v.stalled = False
st.stalled = False
# Put stalled issues in list
self.stalled_nf_issues = sorted([obj for obj in self.issue_objs.values() if ((obj.issue_type == "New Feature") and obj.stalled)], key=operator.attrgetter('status')) # Stalled New Features
self.stalled_st_issues = sorted([st for obj in self.stalled_nf_issues if len(obj.subtasks) for st in obj.subtasks.values() if st.stalled], key=operator.attrgetter('status')) # Stalled subtasks
def get_time_in_status(self, issue, state):
""" Calculate the time in status for this issue from the changelog history.
Go thru changelog histories looking for this status.
Return days in status for this issue.
While we're going thru changelog histories we can also find New Features last sprint and last status.
"""
# current_datetime = str(dt.now()).split('.')[0].replace(' ', 'T') # The current datetime
if ((issue['fields']['issuetype']['name'] == "Sub-task") and (issue['fields']['status']['name'] == "In Progress")) or \
((issue['fields']['issuetype']['name'] == "New Feature") and (issue['fields']['status']['name'] not in ["Open", "In Progress", "Reopened", "Resolved", "Closed"])):
print "Getting time in status for %s" % issue['fields']['status']['name']
# At first assume state is first, last and only state in case no changelog, if there's a changelog we'll revise later.
# First state create time same as issue created datetime
cicdt = dt.strptime(issue['fields']['created'].split('.')[0], "%Y-%m-%dT%H:%M:%S") # Current item create datetime
nicd = dt.strptime(str(dt.now()).split('.')[0].replace(' ', 'T'), "%Y-%m-%dT%H:%M:%S") # Next item create datetime
diff = nicd - cicdt # Time the issue has been in that state
# If there's a changelog history go thru it else use issue create date and current date for time in status
# If there's no changelog/histories then we're still in first state, nothing has changed
if not len(issue['changelog']['histories']):
print "no changelog for %s" % issue['key']
self.issue_objs[issue['key']].time_in_status = diff.days
else:
# If there is a changelog look for the value "status", changes may be something other than status
print "There's a changelog for issue %s" % issue['key']
# print json.dumps(issue, indent=4)
# Go thru changelog histories to see if any of the changes are "status" for this state, also get last status or sprint if New Feature
status_change_found = False
for histories_item in issue['changelog']['histories']:
if ((histories_item['items'][0]['field'] == 'status') and (histories_item['items'][0]['toString'] == state)):
print "There's a status change"
status_change_found = True
# Get oldest sprint and status within config.change_period days ago for New Features
if (issue['fields']['issuetype']['name'] == "New Feature") and \
((histories_item['items'][0]['field'] == 'Milestone(s)') or (histories_item['items'][0]['field'] == 'status')):
hicdt = dt.strptime(histories_item['created'].split('.')[0], "%Y-%m-%dT%H:%M:%S").date() # histories item create datetime
if hicdt < date.today()-timedelta(days=int(self.config.get('default', 'change_period'))):
print "change too old %s, getting next histories item" % hicdt
continue # If older than config.change_period days then we don't care, get next histories item
else:
print "recent change %s " % hicdt
if histories_item['items'][0]['field'] == 'Milestone(s)':
self.issue_objs[issue['key']].last_sprint = histories_item['items'][0]['fromString'] # last sprint if issue less that config.change_period old
print "last_sprint %s" % self.issue_objs[issue['key']].last_sprint
break # Got the oldest change within change_period
if histories_item['items'][0]['field'] == 'status':
self.issue_objs[issue['key']].last_status = histories_item['items'][0]['fromString'] # last status if issue less that config.change_period old
print "last_status %s" % self.issue_objs[issue['key']].last_status
break # Got the oldest change within change_period
# If there was a change in status calculate time in status from changelog
if(status_change_found):
total_days = 0 # Accumulate all the diffs for each time in this state
# Go thru changelog histories to see if this issue was in this state before.
for i, histories_item in enumerate(issue['changelog']['histories']):
# All the different kinds of things that have changed, we're only interested in 'status'
# Display all the changelog history items
if (0):
for k, v in histories_item.items():
print "issue['changelog']['histories'] key: %s value: %s" % (k, v)
print "issue['changelog']['histories']['items'] %s" % histories_item['items']
for item in histories_item['items']:
print "histories_item['items'] %s" % item
print "histories_item['items'][0] %s" %histories_item['items'][0]
for item2 in histories_item['items'][0]:
print "histories_item['items'][0] %s %s" % (item2, histories_item['items'][0][item2])
# If status record we're interested in is found, look for next status change or use current date for end date
# If status "Pending" for New Features or "Open" for Sub-tasks there is no change entry for that, it's first state, use created date.
if ((histories_item['items'][0]['field'] == 'status') and (histories_item['items'][0]['toString'] == state)):
current_status_create_datetime = histories_item['created'].split('.')[0]
# Go thru future history changes until the next status change
status_found = False
for j in range(i+1, len(issue['changelog']['histories'])):
if (issue['changelog']['histories'][j]['items'][0]['field'] == 'status'):
next_item_create_date = issue['changelog']['histories'][j]['created'].split('.')[0] # Only next status change not any change
print "%s. next histories item created: %s" % (j, next_item_create_date)
status_found = True
break # Break the for loop, next status found
if not status_found:
next_item_create_date = str(dt.now()).split('.')[0].replace(' ', 'T') # The current datetime
print "No next histories item, using today's date: %s" % (next_item_create_date)
cscdt = dt.strptime(current_status_create_datetime, "%Y-%m-%dT%H:%M:%S") # Current status create datetime
nicdt = dt.strptime(next_item_create_date, "%Y-%m-%dT%H:%M:%S") # Next item create datetime
print "Current status create date for %s: %s" % (issue['key'], cscdt)
print "Next item create date for %s: %s" % (issue['key'], nicdt)
diff = nicdt - cscdt # The number of days the issue has been in that state
total_days += diff.days # Accumulate all the times this issue has been in this state.
self.issue_objs[issue['key']].time_in_status = total_days
else:
self.issue_objs[issue['key']].time_in_status = diff.days # No change in status, days in status is issue created date to now
def make_nfs_changed_rows(self, info_type):
""" Make html table for email from a list of IssueClass objects.
The table is all the New Features that have changed sprint or status since self.change_period.
"""
html_table = ""
# Put data in html table rows
for v in sorted(self.issue_objs.values(), key=operator.attrgetter('issue_id')):
do_the_rest = False
# New Features only that have changed sprint or status
if (v.issue_type == "New Feature"):
if (info_type == 'sprint') and hasattr(v, 'last_sprint') and (v.sprint != v.last_sprint):
print "sprint %s not equal to last sprint %s" % (v.sprint, v.last_sprint)
html_table += '<tr><td nowrap>New Feature changed from \"%s\" to \"%s\"</td>' % (v.last_sprint, v.sprint)
do_the_rest = True
elif (info_type == 'status') and hasattr(v, 'last_status') and (v.status != v.last_status):
print "status %s not equal to last status %s" % (v.status, v.last_status)
html_table += '<tr><td nowrap>New Feature changed from \"%s\" to \"%s\"</td>' % (v.last_status, v.status)
do_the_rest = True
if do_the_rest:
html_table += '<td nowrap>| %s |</td>' % v.assignee # | is vertical bar in html
if '&' in v.summary:
v.summary = v.summary.replace('&', '&') # Ampersands screw up html, replace with html escaped version
html_table += '<td nowrap><a href=\"http://jira.sandforce.com/browse/%s\">%s</a></td>' % (v.issue_id, v.issue_id)
html_table += '<td nowrap>%s</td></tr>' % v.summary
if html_table:
html_table += '<tr><td nowrap> </td></tr>' # blank line at end of table
return html_table
def make_time_in_status_rows(self, obj_list):
""" Make html table for email from a list of IssueClass objects. """
# Make an html table from a list of IssueClass objects
html_table = ""
# Put the data in html table rows
for item in obj_list:
html_table += '<tr><td nowrap>%s in \"%s\" status for %s days</td>' % (item.issue_type, item.status, item.time_in_status)
html_table += '<td nowrap>| %s |</td>' % item.assignee # | is vertical bar in html
if '&' in item.summary:
item.summary = item.summary.replace('&', '&') # Ampersands screw up html, replace with html escaped version
html_table += '<td nowrap><a href=\"http://jira.sandforce.com/browse/%s\">%s</a></td>' % (item.issue_id, item.issue_id)
html_table += '<td nowrap>%s</td></tr>' % item.summary
html_table += '<tr><td nowrap> </td></tr>' # blank line at end of table
return html_table
def put_subtask_in_parent(self):
""" Put subtask objects in their parent object. """
print "Putting subtasks in parents"
for obj in self.issue_objs.values():
if obj.issue_type == "Sub-task":
try:
self.issue_objs[self.issue_objs[obj.issue_id].issue_parent].subtasks[obj.issue_id] = obj
del self.issue_objs[obj.issue_id] # Delete subtask object after putting in parent.
except KeyError:
print "Can't find parent issue for subtask %s" % obj.issue_id
sys.exit(1)
# def send_assignee_emails(self, recipients):
def send_assignee_emails(self):
""" Just make the email body (html table) and pass to send_email()
This sends a separate email to each assignee with their issues only.
"""
assignees = list(set([obj.assignee for obj in self.stalled_nf_issues])) # Assignees from New Features
assignees.extend(list(set([obj.assignee for obj in self.stalled_st_issues]))) # Add assignees from Sub-tasks
recipients = self.config.get("recipients", "emails").split("\n") # [recipients] section in .ini file
for assignee in assignees:
assignee_issues = [] # List of IssueClass objects
# Get all stalled New feature issues for this assignee
for item in self.stalled_nf_issues + self.stalled_st_issues:
if item.assignee == assignee:
# if item.assignee == "ashih":
assignee_issues.append(item)
assignee_email = item.assignee_email
if len(assignee_issues):
html_table = '<table style="font-size:12px">'
html_table += self.make_time_in_status_rows(assignee_issues)
html_table += '</table>' # Closing table tag
#recipients.append(assignee_email)
print "Sending email to: %s" % recipients
self.send_email(recipients, html_table, assignee)
def send_email(self, recipients, html_data, assignee=None):
""" Put html_data in the body of an html email and send it to recipients
recipients is a list
"""
msg = MIMEMultipart('alternative')
# msg['Subject'] = "Jira Alert - Stagnant Jiras %s" % self.options.fl_project
msg['Subject'] = "Jira Alert - Stagnant Jiras"
msg['From'] = 'jira.alert@lsi.com'
if assignee:
msg['To'] = assignee
msg['Cc'] = ', '.join(recipients) # Assignee emails
else:
msg['To'] = ', '.join(recipients) # Main email
html1 = "<!DOCTYPE html><html><head><meta charset=\"utf-8\"/><title>HTML Reference</title></head><body>"
html2 = "</body></html>"
final_message = "%s%s%s" % (html1, html_data, html2)
html_message = MIMEText(final_message, 'html', _charset='utf-8')
msg.attach(html_message)
# Send the message via our own SMTP server.
s = smtplib.SMTP('localhost')
s.set_debuglevel(1)
# s.sendmail('richard.leblanc@lsi.com', recipients, msg.as_string())
s.sendmail('jira.alert@lsi.com', recipients, msg.as_string())
s.quit()
# def send_main_email(self, recipients):
def send_main_email(self):
""" Just make the email body (html) and pass to send_email()
This is the main email that contains issues for all assignees.
"""
print "Sending main email"
# Make an html table to be body of email
html_table = '<table style="font-size:12px">'
html_table += self.make_nfs_changed_rows("sprint") # New features only
html_table += self.make_nfs_changed_rows("status") # New features only
html_table += self.make_time_in_status_rows(self.stalled_nf_issues)
html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks
html_table += '</table>' # Closing table tag
recipients = self.config.get("recipients", "emails").split("\n") # [recipients] section in .ini file
# emails = self.config.items('recipients')
# for key, email in emails:
# recipients = ', '.join(self.config.items('recipients'))
print recipients
# sys.exit()
self.send_email(recipients, html_table)
def main(argv=None):
if argv is None:
argv = sys.argv
# usage = "usage: %prog --fl_project=<fl project name>\n ex. %prog --fl_project=\"Griffin MP1\""
ja = JiraAlert() # The one and only JiraAlert object
# ja.parser = OptionParser()
# ja.parser.add_option("--fl_project", action="store", type="string", dest="fl_project", help="FL project to get issues for", metavar="FL_PROJECT")
# ja.parser.set_usage(usage)
# (options, args) = ja.parser.parse_args()
# if options.fl_project == None:
# ja.parser.error("incorrect number of arguments")
# ja.parser.print_usage()
# else:
# ja.options = options
ja.config = ConfigParser.SafeConfigParser() # Make a config parser to parse the jira_alert.ini file
ja.config.optionxform=str # To preserve case of values
try:
if ja.config.read('jira_alert.ini') != []:
pass
else:
raise IOError('Cannot open configuration file')
except ConfigParser.ParsingError, error:
print 'Error,', error
# The jira query that will get the issues we're interested in.
jql = '(("FL Project" = "G.1.0") and ("project" = "TITAN") and \
(issuetype = "New Feature") and (status != "Open" or status != "In Progress" or status != "Reopened" or status != "Resolved" or status != "Closed") or \
(issuetype = Sub-task and status = "In Progress"))'
# (issuetype = Sub-task and status = "In Progress"))' % self.options.fl_project
print "jql: %s" % jql
# temp_issue = jira_utils.get_issue("TEST-114")
# print json.dumps(temp_issue, indent=4)
# sys.exit()
ja.issues = list(jira_utils.get_issues(jql)) # Gets issues using the jgl query (turn returned json into python list)
print "Number of issues: %s" % len(ja.issues)
ja.create_issue_objs() # Create IssueClass objects for each issue, also gets changes and time in status
ja.put_subtask_in_parent()
ja.get_stalled_issues()
ja.send_main_email() # Email complete list of latest issue status times
# ja.send_assignee_emails() # Send email to assignees about their issues
if __name__ == "__main__":
sys.exit(main())
|
10,138 | 1b4bd28e41f14829f6cb760541c9114becf4863f |
with open("D:\DanielVIB\Maize\MoreDataSets\PearsonNetworks\ConsolidateCut\ProcessesRecallNoBP.txt", "r") as fileSAC,\
open("D:\DanielVIB\Maize\MoreDataSets\PearsonNetworks\ConsolidateCut\ProcessesRecallKrem.txt", "r") as fileKrem,\
open("D:\DanielVIB\Maize\MoreDataSets\PearsonNetworks\ConsolidateCut\ProcessesRecallKremNoBP.txt", "w") as outfile:
#Load krem file into a dic
# skip head
next(fileKrem)
kremRecords = {}
for line in fileKrem:
fields = line.split("\t")
proc = fields[0]
kremRecords[proc] = line
#iterateSAC to print Krem in the same format
# print head
outfile.write(next(fileSAC))
for line in fileSAC:
fields = line.split("\t")
proc = fields[0]
lineKrem = kremRecords.get(proc)
outfile.write(lineKrem)
|
10,139 | 4ebee1de80f17cbfef5a335124fd85c762adf5cf | from postprocess import dynamic_audio_normalize
from _shutil import *
f = get_files()[0]
dynamic_audio_normalize(f)
|
10,140 | 20c22db1d9ac8c1f01fe5e64609396c07518bc08 | from copy import deepcopy
from django.contrib.auth.hashers import make_password
from django.core.mail import send_mail
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.contrib.auth import login, update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib import messages
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text
from django.http import HttpResponse
from core.services.client_service import request_post, request_get
from security.auth.api_urls import GET_USER_BY_EMAIL
from security.auth.token_generator import account_activation_token
from .models import User
from security.auth.serializers import UserSerializer
from security.forms.register_form import RegisterForm
from security.forms.details_form import DetailsForm
from security.auth import api_urls as api
import json
def register_view(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
user = form.save()
user.save()
email = form.cleaned_data.get('email')
_send_activation_mail(email, user, get_current_site(request))
return HttpResponse('Please confirm your email address to complete the registration') #TODO SUCCESS REGISTER EMAIL
else:
form = RegisterForm()
return render(request, 'registration/register.html', {'form': form})
@login_required
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
old_password = deepcopy(request.user.password)
user = form.save()
update_session_auth_hash(request, user)
pass_updater = {
"login": request.user.email,
"password": old_password,
"UPDATE": {
"oldPassword": old_password,
"newPassword": user.password
}
}
response = request_post(api.CHANGE_PASSWORD, data=pass_updater)
if response.status_code != 200:
return redirect('error404')
messages.success(request, "Password changed")
return redirect('user_ads')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'registration/change_password.html', {'form': form})
@login_required
def show_details(request):
return render(request, 'details.html')
@login_required
def edit_details(request):
if request.method == 'POST':
form = DetailsForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect('show_details')
else:
response = request_get(GET_USER_BY_EMAIL+request.user.email)
if response.status_code == 200:
name_db = response.json().get('name', None)
if name_db == request.user.name:
form = DetailsForm(instance=request.user)
else:
form = None
else:
form = None
return render(request, 'edit_details.html', {'form': form})
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
serializer = UserSerializer(user)
response = request_post(api.REGISTER_USER, data=serializer.data)
if response is None:
return HttpResponse("Error occured") #TODO
data = json.loads(response.text)
server_id = data.get('id', None)
if server_id is None:
return HttpResponse("Server error") #TODO
user.server_id = server_id
user.save()
login(request, user)
return HttpResponse('Thank you for your email confirmation. Now you can login your account.')
else:
return HttpResponse('Activation link is invalid!')
def _send_activation_mail(email: str, user: User, current_site):
message = render_to_string('registration/email_acc.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
send_mail(
'Account Activation',
message,
'addservice@op.pl',
[email],
fail_silently=False,
)
|
10,141 | f8cf01e2f554205b8a1a9b085c98b46d791ca3b3 | """backport module"""
import os
import shutil
import re
class Backport:
"""A simple class for transforming Python source files.
"""
DEFAULT_PATH = "."
PYTHON_EXTENSION = "py"
BACKUP_EXTENSION = "orig"
REPLACEMENTS = {}
IGNORE = (__file__,)
def __init__(self, path=DEFAULT_PATH, filenames=None):
if filenames is None:
filenames = self.get_filenames(path)
self.filenames = filenames
@classmethod
def get_filenames(cls, path):
filenames = os.listdir(path)
for filename in filenames[:]:
if filename in cls.IGNORE:
filenames.remove(filename)
continue
try:
name, ext = filename.rsplit(".", 1)
except ValueError:
filenames.remove(filename)
continue
if ext == cls.BACKUP_EXTENSION:
if not name.endswith("."+cls.PYTHON_EXTENSION):
filenames.remove(filename)
elif name.count(".") > 1:
filenames.remove(filename)
elif "." in name:
filenames.remove(filename)
elif ext != cls.PYTHON_EXTENSION:
filenames.remove(filename)
return filenames
def backup(self):
"""Generate a backup file for each file."""
for filename in self.filenames[:]:
if not filename.endswith("."+self.PYTHON_EXTENSION):
continue
origfilename = filename + "." + self.BACKUP_EXTENSION
if origfilename not in self.filenames:
shutil.copy(filename, origfilename)
self.filenames.append(origfilename)
def restore(self, clean=False):
"""Restore the original files.
If clean is True, wipe out the backup files.
"""
for origfilename in self.filenames[:]:
if not origfilename.endswith("."+self.BACKUP_EXTENSION):
continue
filename = origfilename.strip("."+self.BACKUP_EXTENSION)
shutil.copy(origfilename, filename)
self.filenames.append(filename)
if clean:
os.remove(origfilename)
def transform(self, source):
for old, new in self.REPLACEMENTS.items():
source = re.sub("(?m)"+old, new, source)
return source
def run(self, dryrun=True):
self.backup()
self.restore()
for filename in self.filenames:
if not filename.endswith(self.PYTHON_EXTENSION):
continue
infile = open(filename)
source = infile.read()
infile.close()
source = self.transform(source)
if __debug__:
print("")
print(filename + "%%"*50)
print(source)
if not dryrun:
open(filename, "w").write(source)
|
10,142 | 6b27564c0870f6ef341dcc425d6af2ed6fc726f3 | # Copyright (c) 2021 by Cisco Systems, Inc.
# All rights reserved.
expected_output = {
'evi': {
1: {
'bd_id': {
11: {
'eth_tag': {
0: {
'remote_count': 4,
'local_count': 5,
'dup_count': 1,
},
},
},
},
},
},
}
|
10,143 | bbaeb7b13e50e14fe29dab5846310e43b6f73e83 | '''
Individual stages of the pipeline implemented as functions from
input files to output files.
The run_stage function knows everything about submitting jobs and, given
the state parameter, has full access to the state of the pipeline, such
as config, options, DRMAA and the logger.
'''
from utils import safe_make_dir
from runner import run_stage
import os
# PICARD_JAR = '$PICARD_HOME/lib/picard-1.69.jar'
PICARD_JAR = '/vlsci/VR0002/kmahmood/Programs/picard/picard-tools-2.0.1/picard.jar'
GATK_JAR = '$GATK_HOME/GenomeAnalysisTK.jar'
def java_command(jar_path, mem_in_gb, command_args):
'''Build a string for running a java command'''
# Bit of room between Java's max heap memory and what was requested.
# Allows for other Java memory usage, such as stack.
java_mem = mem_in_gb - 2
return 'java -Xmx{mem}g -jar {jar_path} {command_args}'.format(
jar_path=jar_path, mem=java_mem, command_args=command_args)
def run_java(state, stage, jar_path, mem, args):
command = java_command(jar_path, mem, args)
run_stage(state, stage, command)
class Stages(object):
def __init__(self, state):
self.state = state
self.reference = self.get_options('ref_hg19')
self.dbsnp_hg19 = self.get_options('dbsnp_hg19')
self.mills_hg19 = self.get_options('mills_hg19')
self.one_k_g_snps = self.get_options('one_k_g_snps')
self.one_k_g_indels = self.get_options('one_k_g_indels')
self.one_k_g_highconf_snps = self.get_options('one_k_g_highconf_snps')
self.hapmap = self.get_options('hapmap')
self.interval_hg19 = self.get_options('exome_bed_hg19')
self.CEU_mergeGvcf = self.get_options('CEU_mergeGvcf')
# self.GBR_mergeGvcf = self.get_options('GBR_mergeGvcf')
# self.FIN_mergeGvcf = self.get_options('FIN_mergeGvcf')
def run_picard(self, stage, args):
mem = int(self.state.config.get_stage_options(stage, 'mem'))
return run_java(self.state, stage, PICARD_JAR, mem, args)
def run_gatk(self, stage, args):
mem = int(self.state.config.get_stage_options(stage, 'mem'))
return run_java(self.state, stage, GATK_JAR, mem, args)
def get_stage_options(self, stage, *options):
return self.state.config.get_stage_options(stage, *options)
def get_options(self, *options):
return self.state.config.get_options(*options)
def original_fastqs(self, output):
'''Original fastq files'''
# print output
pass
def align_bwa(self, inputs, bam_out, read_id, lib, lane, sample_id):
# def align_bwa(self, inputs, bam_out, sample_id):
'''Align the paired end fastq files to the reference genome using bwa'''
fastq_read1_in, fastq_read2_in = inputs
cores = self.get_stage_options('align_bwa', 'cores')
safe_make_dir('alignments/{sample}'.format(sample=sample_id))
read_group = '"@RG\\tID:{readid}\\tSM:{sample}\\tPU:lib1\\tLN:{lane}\\tPL:Illumina"' \
.format(readid=read_id, lib=lib, lane=lane, sample=sample_id)
command = 'bwa mem -t {cores} -R {read_group} {reference} {fastq_read1} {fastq_read2} ' \
'| samtools view -b -h -o {bam} -' \
.format(cores=cores,
read_group=read_group,
fastq_read1=fastq_read1_in,
fastq_read2=fastq_read2_in,
reference=self.reference,
bam=bam_out)
run_stage(self.state, 'align_bwa', command)
def sort_bam_picard(self, bam_in, sorted_bam_out):
'''Sort the BAM file using Picard'''
picard_args = 'SortSam INPUT={bam_in} OUTPUT={sorted_bam_out} ' \
'VALIDATION_STRINGENCY=LENIENT SORT_ORDER=coordinate ' \
'MAX_RECORDS_IN_RAM=5000000 CREATE_INDEX=True'.format(
bam_in=bam_in, sorted_bam_out=sorted_bam_out)
self.run_picard('sort_bam_picard', picard_args)
def mark_duplicates_picard(self, bam_in, outputs):
'''Mark duplicate reads using Picard'''
dedup_bam_out, metrics_out = outputs
picard_args = 'MarkDuplicates INPUT={bam_in} OUTPUT={dedup_bam_out} ' \
'METRICS_FILE={metrics_out} VALIDATION_STRINGENCY=LENIENT ' \
'MAX_RECORDS_IN_RAM=5000000 ASSUME_SORTED=True ' \
'CREATE_INDEX=True'.format(bam_in=bam_in, dedup_bam_out=dedup_bam_out,
metrics_out=metrics_out)
self.run_picard('mark_duplicates_picard', picard_args)
def realigner_target_creator(self, inputs, intervals_out):
'''Generate chromosome intervals using GATK'''
bam_in, _metrics_dup = inputs
cores = self.get_stage_options('chrom_intervals_gatk', 'cores')
gatk_args = '-T RealignerTargetCreator -R {reference} -I {bam} ' \
'--num_threads {threads} --known {mills_hg19} ' \
'--known {one_k_g_indels} ' \
'--known {one_k_g_indels} ' \
'-o {out}'.format(reference=self.reference, bam=bam_in,
threads=cores, mills_hg19=self.mills_hg19,
one_k_g_indels=self.one_k_g_indels,
out=intervals_out)
self.run_gatk('chrom_intervals_gatk', gatk_args)
def local_realignment_gatk(self, inputs, bam_out):
'''Local realign reads using GATK'''
target_intervals_in, bam_in = inputs
gatk_args = "-T IndelRealigner -R {reference} -I {bam} " \
"-targetIntervals {target_intervals} -known {mills_hg19} " \
"-known {one_k_g_indels} " \
"-o {out}".format(reference=self.reference, bam=bam_in,
mills_hg19=self.mills_hg19,
one_k_g_indels=self.one_k_g_indels,
target_intervals=target_intervals_in,
out=bam_out)
self.run_gatk('local_realignment_gatk', gatk_args)
# XXX I'm not sure that --num_cpu_threads_per_data_thread has any benefit
# here
def base_recalibration_gatk(self, bam_in, outputs):
'''Base recalibration using GATK'''
csv_out, log_out = outputs
gatk_args = "-T BaseRecalibrator -R {reference} -I {bam} " \
"--num_cpu_threads_per_data_thread 4 --knownSites {dbsnp_hg19} " \
"--knownSites {mills_hg19} --knownSites {one_k_g_indels} " \
"-log {log} -o {out}".format(reference=self.reference, bam=bam_in,
mills_hg19=self.mills_hg19, dbsnp_hg19=self.dbsnp_hg19,
one_k_g_indels=self.one_k_g_indels,
log=log_out, out=csv_out)
self.run_gatk('base_recalibration_gatk', gatk_args)
# XXX I'm not sure that --num_cpu_threads_per_data_thread has any benefit
# here
def print_reads_gatk(self, inputs, bam_out):
'''Print reads using GATK'''
[csv_in, _log], bam_in = inputs
gatk_args = "-T PrintReads -R {reference} -I {bam} --BQSR {recal_csv} " \
"-o {out} --num_cpu_threads_per_data_thread 4".format(reference=self.reference,
bam=bam_in, recal_csv=csv_in, out=bam_out)
self.run_gatk('print_reads_gatk', gatk_args)
# Merge per lane bam into a single bam per sample
def merge_sample_bams(self, bam_files_in, bam_out):
'''Merge per lane bam into a merged bam file'''
bam_files = ' '.join(['INPUT=' + bam for bam in bam_files_in])
picard_args = 'MergeSamFiles {bams_in} OUTPUT={merged_bam_out} ' \
'VALIDATION_STRINGENCY=LENIENT ' \
'MAX_RECORDS_IN_RAM=5000000 ASSUME_SORTED=True ' \
'CREATE_INDEX=True'.format(
bams_in=bam_files, merged_bam_out=bam_out)
self.run_picard('merge_sample_bams', picard_args)
def call_haplotypecaller_gatk(self, bam_in, vcf_out):
'''Call variants using GATK'''
# safe_make_dir('variants}'.format(sample=sample_id))
gatk_args = "-T HaplotypeCaller -R {reference} --min_base_quality_score 20 " \
"--emitRefConfidence GVCF " \
"-A AlleleBalance -A AlleleBalanceBySample " \
"-A ChromosomeCounts -A ClippingRankSumTest " \
"-A Coverage -A DepthPerAlleleBySample " \
"-A DepthPerSampleHC -A FisherStrand " \
"-A GCContent -A GenotypeSummaries " \
"-A HardyWeinberg -A HomopolymerRun " \
"-A LikelihoodRankSumTest -A LowMQ " \
"-A MappingQualityRankSumTest -A MappingQualityZero " \
"-A QualByDepth " \
"-A RMSMappingQuality -A ReadPosRankSumTest " \
"-A SampleList -A SpanningDeletions " \
"-A StrandBiasBySample -A StrandOddsRatio " \
"-A TandemRepeatAnnotator -A VariantType " \
"-I {bam} -L {interval_list} -o {out}".format(reference=self.reference,
bam=bam_in, interval_list=self.interval_hg19, out=vcf_out)
self.run_gatk('call_haplotypecaller_gatk', gatk_args)
def call_haplotypecaller_gatk_nct(self, bam_in, vcf_out):
'''Call variants using GATK'''
# safe_make_dir('variants}'.format(sample=sample_id))
gatk_args = "-T HaplotypeCaller -R {reference} --min_base_quality_score 20 " \
"--standard_min_confidence_threshold_for_calling 30.0 " \
"--num_cpu_threads_per_data_thread 4 " \
"--variant_index_type LINEAR " \
"--standard_min_confidence_threshold_for_emitting 30.0 " \
"-I {bam} -L {interval_list} -o {out}".format(reference=self.reference,
bam=bam_in, interval_list=self.interval_hg19, out=vcf_out)
self.run_gatk('call_haplotypecaller_gatk', gatk_args)
def combine_gvcf_gatk(self, vcf_files_in, vcf_out):
'''Combine G.VCF files for all samples using GATK'''
g_vcf_files = ' '.join(['--variant ' + vcf for vcf in vcf_files_in])
gatk_args = "-T CombineGVCFs -R {reference} " \
"--disable_auto_index_creation_and_locking_when_reading_rods " \
"{g_vcf_files} -o {vcf_out}".format(reference=self.reference,
g_vcf_files=g_vcf_files, vcf_out=vcf_out)
# "{g_vcf_files} -o {vcf_out} --variant {CEU}".format(reference=self.reference,
# g_vcf_files=g_vcf_files, vcf_out=vcf_out, CEU=self.CEU_mergeGvcf)
self.run_gatk('combine_gvcf_gatk', gatk_args)
def genotype_gvcf_gatk(self, merged_vcf_in, vcf_out):
'''Genotype G.VCF files using GATK'''
cores = self.get_stage_options('genotype_gvcf_gatk', 'cores')
gatk_args = "-T GenotypeGVCFs -R {reference} " \
"--disable_auto_index_creation_and_locking_when_reading_rods " \
"-A AlleleBalance -A AlleleBalanceBySample " \
"-A ChromosomeCounts -A ClippingRankSumTest " \
"-A Coverage -A DepthPerAlleleBySample " \
"-A DepthPerSampleHC -A FisherStrand " \
"-A GCContent -A GenotypeSummaries " \
"-A HardyWeinberg -A HomopolymerRun " \
"-A LikelihoodRankSumTest " \
"-A MappingQualityRankSumTest -A MappingQualityZero " \
"-A QualByDepth " \
"-A RMSMappingQuality -A ReadPosRankSumTest " \
"-A SampleList -A SpanningDeletions " \
"-A StrandBiasBySample -A StrandOddsRatio " \
"-A TandemRepeatAnnotator -A VariantType " \
"--dbsnp {dbsnp} " \
"--num_threads {cores} --variant {merged_vcf} --out {vcf_out}" \
.format(reference=self.reference, dbsnp=self.dbsnp_hg19,
cores=cores, merged_vcf=merged_vcf_in, vcf_out=vcf_out)
self.run_gatk('genotype_gvcf_gatk', gatk_args)
# def genotype_gvcf_gatk(self, merged_vcf_in, vcf_out):
# '''Genotype G.VCF files using GATK'''
# cores = self.get_stage_options('genotype_gvcf_gatk', 'cores')
# gatk_args = "-T GenotypeGVCFs -R {reference} " \
# "--disable_auto_index_creation_and_locking_when_reading_rods " \
# "--num_threads {cores} --variant {merged_vcf} --out {vcf_out} " \
# "--variant {CEU_mergeGvcf} --variant {GBR_mergeGvcf} " \
# "--variant {FIN_mergeGvcf}".format(reference=self.reference,
# cores=cores, merged_vcf=merged_vcf_in, vcf_out=vcf_out,
# CEU_mergeGvcf=self.CEU_mergeGvcf, GBR_mergeGvcf=self.GBR_mergeGvcf,
# FIN_mergeGvcf=self.FIN_mergeGvcf)
# self.run_gatk('genotype_gvcf_gatk', gatk_args)
def snp_recalibrate_gatk(self, genotype_vcf_in, outputs):
'''SNP recalibration using GATK'''
recal_snp_out, tranches_snp_out, snp_plots_r_out = outputs
cores = self.get_stage_options('snp_recalibrate_gatk', 'cores')
gatk_args = "-T VariantRecalibrator --disable_auto_index_creation_and_locking_when_reading_rods " \
"-R {reference} --minNumBadVariants 5000 --num_threads {cores} " \
"-resource:hapmap,known=false,training=true,truth=true,prior=15.0 {hapmap} " \
"-resource:omni,known=false,training=true,truth=true,prior=12.0 {one_k_g_snps} " \
"-resource:1000G,known=false,training=true,truth=false,prior=10.0 {one_k_g_highconf_snps} " \
"-an DP -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR " \
"-input {genotype_vcf} --recal_file {recal_snp} --tranches_file {tranches_snp} " \
"-rscriptFile {snp_plots} -mode SNP".format(reference=self.reference,
cores=cores, hapmap=self.hapmap, one_k_g_snps=self.one_k_g_snps,
one_k_g_highconf_snps=self.one_k_g_highconf_snps, genotype_vcf=genotype_vcf_in,
recal_snp=recal_snp_out, tranches_snp=tranches_snp_out, snp_plots=snp_plots_r_out)
self.run_gatk('snp_recalibrate_gatk', gatk_args)
def indel_recalibrate_gatk(self, genotype_vcf_in, outputs):
'''INDEL recalibration using GATK'''
recal_indel_out, tranches_indel_out, indel_plots_r_out = outputs
cores = self.get_stage_options('indel_recalibrate_gatk', 'cores')
gatk_args = "-T VariantRecalibrator --disable_auto_index_creation_and_locking_when_reading_rods " \
"-R {reference} --minNumBadVariants 5000 --num_threads {cores} " \
"-resource:mills,known=false,training=true,truth=true,prior=12.0 {mills_hg19} " \
"-resource:1000G,known=false,training=true,truth=true,prior=10.0 {one_k_g_indels} " \
"-an DP -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR " \
"-input {genotype_vcf} -recalFile {recal_indel} " \
"-tranchesFile {tranches_indel} -rscriptFile {indel_plots} " \
" -mode INDEL --maxGaussians 4".format(reference=self.reference,
cores=cores, mills_hg19=self.mills_hg19, one_k_g_indels=self.one_k_g_indels,
genotype_vcf=genotype_vcf_in, recal_indel=recal_indel_out,
tranches_indel=tranches_indel_out, indel_plots=indel_plots_r_out)
self.run_gatk('indel_recalibrate_gatk', gatk_args)
def apply_snp_recalibrate_gatk(self, inputs, vcf_out):
'''Apply SNP recalibration using GATK'''
genotype_vcf_in, [recal_snp, tranches_snp] = inputs
cores = self.get_stage_options('apply_snp_recalibrate_gatk', 'cores')
gatk_args = "-T ApplyRecalibration --disable_auto_index_creation_and_locking_when_reading_rods " \
"-R {reference} --ts_filter_level 99.5 --excludeFiltered --num_threads {cores} " \
"-input {genotype_vcf} -recalFile {recal_snp} -tranchesFile {tranches_snp} " \
"-mode SNP -o {vcf_out}".format(reference=self.reference,
cores=cores, genotype_vcf=genotype_vcf_in, recal_snp=recal_snp,
tranches_snp=tranches_snp, vcf_out=vcf_out)
self.run_gatk('apply_snp_recalibrate_gatk', gatk_args)
def apply_indel_recalibrate_gatk(self, inputs, vcf_out):
'''Apply INDEL recalibration using GATK'''
genotype_vcf_in, [recal_indel, tranches_indel] = inputs
cores = self.get_stage_options('apply_indel_recalibrate_gatk', 'cores')
gatk_args = "-T ApplyRecalibration --disable_auto_index_creation_and_locking_when_reading_rods " \
"-R {reference} --ts_filter_level 99.0 --excludeFiltered --num_threads {cores} " \
"-input {genotype_vcf} -recalFile {recal_indel} -tranchesFile {tranches_indel} " \
"-mode INDEL -o {vcf_out}".format(reference=self.reference,
cores=cores, genotype_vcf=genotype_vcf_in, recal_indel=recal_indel,
tranches_indel=tranches_indel, vcf_out=vcf_out)
self.run_gatk('apply_indel_recalibrate_gatk', gatk_args)
def combine_variants_gatk(self, inputs, vcf_out):
'''Combine variants using GATK'''
recal_snp, [recal_indel] = inputs
cores = self.get_stage_options('combine_variants_gatk', 'cores')
gatk_args = "-T CombineVariants -R {reference} --disable_auto_index_creation_and_locking_when_reading_rods " \
"--num_threads {cores} --genotypemergeoption UNSORTED --variant {recal_snp} " \
"--variant {recal_indel} -o {vcf_out}".format(reference=self.reference,
cores=cores, recal_snp=recal_snp, recal_indel=recal_indel,
vcf_out=vcf_out)
self.run_gatk('combine_variants_gatk', gatk_args)
def select_variants_gatk(self, combined_vcf, vcf_out):
'''Select variants using GATK'''
gatk_args = "-T SelectVariants -R {reference} --disable_auto_index_creation_and_locking_when_reading_rods " \
"--variant {combined_vcf} -select 'DP > 100' -o {vcf_out}".format(reference=self.reference,
combined_vcf=combined_vcf, vcf_out=vcf_out)
self.run_gatk('select_variants_gatk', gatk_args)
|
10,144 | ce43bb3c2172dd0f286807a751827a4c3ab85816 | # We run a preorder depth first search on the root of a binary tree.
#
# At each node in this traversal, we output D dashes (where D is the depth of t
# his node), then we output the value of this node. (If the depth of a node is D,
# the depth of its immediate child is D+1. The depth of the root node is 0.)
#
# If a node has only one child, that child is guaranteed to be the left child.
#
#
# Given the output S of this traversal, recover the tree and return its root.
#
#
#
# Example 1:
#
#
#
#
# Input: "1-2--3--4-5--6--7"
# Output: [1,2,5,3,4,6,7]
#
#
#
# Example 2:
#
#
#
#
# Input: "1-2--3---4-5--6---7"
# Output: [1,2,5,3,null,6,null,4,null,7]
#
#
#
#
#
#
# Example 3:
#
#
#
#
# Input: "1-401--349---90--88"
# Output: [1,401,null,349,88,90]
#
#
#
#
#
# Note:
#
#
# The number of nodes in the original tree is between 1 and 1000.
# Each node will have a value between 1 and 10^9.
#
#
# Related Topics ๆ ๆทฑๅบฆไผๅ
ๆ็ดข
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from foo import TreeNode
class Solution:
def recoverFromPreorder(self, S: str) -> TreeNode:
if not S:
return None
root = TreeNode(S[0])
def my_split(line: str, reg: str):
cut_point = []
for i in range(1, len(line)):
if line[i:i + len(reg)] == reg and line[i - 1].isnumeric() and line[i + len(reg)].isnumeric():
cut_point.append(i)
if len(cut_point) == 1:
return [line[0], line[cut_point[0] + len(reg):]]
elif len(cut_point) == 2:
return [line[0], line[cut_point[0] + len(reg):cut_point[1]], line[cut_point[1] + len(reg):]]
def recur(father: TreeNode, sp: str, inner_str: str):
if "-" not in inner_str:
return
str_list = my_split(inner_str, sp)
left_str = str_list[1] if len(str_list) > 1 else None
right_str = str_list[2] if len(str_list) > 2 else None
if left_str:
left_node = TreeNode(left_str[0])
father.left = left_node
recur(left_node, sp + "-", left_str)
if right_str:
right_node = TreeNode(right_str[0])
father.right = right_node
recur(right_node, sp + "-", right_str)
recur(root, "-", S)
return root
# a = Solution().recoverFromPreorder("1-2--3--4-5--6--7")
# a = Solution().recoverFromPreorder("1-2--3---4-5--6---7")
a = Solution().recoverFromPreorder("1-401--349---90--88")
# leetcode submit region end(Prohibit modification and deletion)
|
10,145 | b07556b6800e79679cc3d40276a098aa90b0bff4 | import unittest
from deep_tasks import *
class TestFindFuncs(unittest.TestCase):
def setUp(self):
self.diki = {
'A': {
'C': [2, 5],
'D': {
'I': 'heyo!',
'J': 6,
'F': 'In [A][D]'
},
'E': False
},
'B': {
'F': 'In [B]',
'G': None,
'H': True
}
}
def test_deep_find_with_target_in_top_level(self):
result = deep_find(self.diki, 'B')
expected = {'F': 'In [B]', 'G': None, 'H': True}
self.assertEqual(result, expected)
def test_deep_find_with_target_inside_first_branch(self):
result = deep_find(self.diki, 'E')
expected = False
self.assertEqual(result, expected)
def test_deep_find_with_target_outside_first_branch(self):
result = deep_find(self.diki, 'J')
expected = 6
self.assertEqual(result, expected)
def test_deep_find_with_duplicate_key(self):
result = deep_find(self.diki, 'F')
expected = 'In [A][D]'
self.assertEqual(result, expected)
def test_broad_find_with_target_in_top_level(self):
result = broad_find(self.diki, 'B')
expected = {'F': 'In [B]', 'G': None, 'H': True}
self.assertEqual(result, expected)
def test_broad_find_finds_correct_duplicate_key(self):
result = broad_find(self.diki, 'F')
expected = 'In [B]'
self.assertEqual(result, expected)
class TestFindAllFuncs(unittest.TestCase):
def setUp(self):
self.dic = {
'A': {
'C': [2, 5],
'D': {
'I': 'heyo!',
'C': 6,
'F': 'In [A][D]'
},
'E': False
},
'B': {
'F': 'In [B]',
'C': None,
'H': True
}
}
def test_if_deep_f_all_is_depth_first(self):
result = list()
expected = ['In [A][D]', 'In [B]']
for hit in deep_f_all(self.dic, 'F'):
result.append(hit)
self.assertEqual(result, expected)
def test_deep_f_all_with_key_not_in_data(self):
result = list()
expected = []
for hit in deep_f_all(self.dic, 'Z'):
result.append(hit)
self.assertEqual(result, expected)
def test_if_broad_f_all_is_breadth_first(self):
result = list()
expected = [[2, 5], None, 6]
for hit in broad_f_all(self.dic, 'C'):
result.append(hit)
self.assertEqual(result, expected)
class TestDeepUpdate(unittest.TestCase):
def setUp(self):
self.dic = {
'A': {
'C': '[2, 5]',
'D': {
'I': 'heyo!',
'C': '6',
'F': 'In [A][D]'
},
'E': 'False'
},
'B': {
'F': 'In [B]',
'C': 'None',
'H': 'True'
}
}
def test_deep_update_changing_nested_dict_to_string(self):
result = deep_update(self.dic, 'D', True)
expected = {
'A': {
'C': '[2, 5]',
'D': True,
'E': 'False'
},
'B': {
'F': 'In [B]',
'C': 'None',
'H': 'True'
}
}
self.assertEqual(result, expected)
def test_deep_update_changes_values_for_all_matching_keys(self):
result = deep_update(self.dic, 'C', True)
expected = {
'A': {
'C': True,
'D': {
'I': 'heyo!',
'C': True,
'F': 'In [A][D]'
},
'E': 'False'
},
'B': {
'F': 'In [B]',
'C': True,
'H': 'True'
}
}
self.assertEqual(result, expected)
class TestDeepApply(unittest.TestCase):
def setUp(self):
self.dic = {
'A': {
'list': [2, 5],
'C': {
'str': 'a',
'int': 6,
},
'bool': False
},
'B': None,
}
def test_deep_apply_with_simple_times_two_function(self):
result = deep_apply(times_two, self.dic)
expected = {
'A': {
'list': [2, 5, 2, 5],
'C': {
'str': 'aa',
'int': 12,
},
'bool': 0
},
'B': None,
}
self.assertEqual(result, expected)
class TestSchemaValidator(unittest.TestCase):
def setUp(self):
self.schema = [
'key1',
'key2', [
'key3', [
'inner_key1',
'inner_key2'
]
]
]
self.valid_dic = {
'key1': 'val1',
'key2': 'val2',
'key3': {
'inner_key1': 'val1',
'inner_key2': 'val2'
}
}
self.inval_dic = {
'key1': 'val1',
'key2': 'val2',
'key3': {
'inner_key1': 'val1',
'inner_key2': 'val2'
},
'key4': 'not expected'
}
def test_schema_validator_validates_valid_dic(self):
result = schema_validator(self.schema, self.valid_dic)
expected = True
self.assertEqual(result, expected)
def test_schema_validator_doesnt_validate_inval_dic(self):
result = schema_validator(self.schema, self.inval_dic)
expected = False
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
|
10,146 | d4814dc755ff0f3a8e82427659a7e4dfeaa84bcd | import math
import operator
from scipy import stats
from utils import getTweets
w1 = 0.3
w2 = 0.3
w3 = 0.3
def kullback_leibler(distr_a,distr_b):
return stats.entropy(distr_a,distr_b)
def logistic_increasing_func(x):
return 1 / (1 + math.exp(x))
def coverage_score(tweet):
return 0
def significance_score(tweet):
return 0
def diversity_score(tweet):
return 0
def overall_score(tweet):
return w1 * (1 - logistic_increasing_func(coverage_score(tweet))) + w2 * logistic_increasing_func(
significance_score(tweet)) + w3 * logistic_increasing_func(diversity_score(tweet))
tweets = getTweets('baltimore')
selected = {}
scores= {}
for id,tweet in tweets.items():
score = overall_score(tweet.text)
scores[id] = score
sorted_scores = sorted(scores.items(), key=operator.itemgetter(1))
sorted_scores = dict(sorted_scores[0:49]) |
10,147 | e70493981b140fc0b59b99b30f0d67c37ba967ea | from RSAAlgorithm import autoGen as genKeyPair
class Keyring:
def __init__(self):
self.name = "Me"
self.publicKey, self.privateKey = genKeyPair(10)#10 bits of entropy is ridiculously weak: for testing purposes only.
self.keys = [{self.name : self.publicKey}]
def addKeyValuePair(self, keyValuePair):
self.keys.append(keyValuePair)
def deleteKeyPairByKey(self, publicKey):
self.keys = [x for x in self.keys if list(x.values())[0] != publicKey]
def deleteKeyPairByName(self, name):
self.keys = [x for x in self.keys if list(x.keys())[0] != name]
def importKeyRingFromTrusted(self, trustedKey, keyRing):
tempList = [x for x in self.keys if x == trustedKey]
if len(tempList)>0:
keyRing = [x for x in keyRing if x not in self.keys]
self.keys.append(keyRing)
|
10,148 | 24b99b72d627ec6231516524c44767c60a4a5b5f | from functools import reduce
with open('input') as rawinput:
groups = rawinput.read().split('\n\n')
def count_individual_answers(individual):
def reducer(accumulator, letter):
accumulator[letter] = accumulator.get(letter, 0) + 1
return accumulator
return reduce(reducer, individual, {})
def count_group_answers(group):
def reducer(accumulator, individual):
counted = count_individual_answers(individual)
def update(_accumulator, question):
_accumulator[question] = _accumulator.get(question, 0) + counted[question]
return _accumulator
return reduce(update, counted.keys(), accumulator)
group_answers = reduce(reducer, [g for g in group.split('\n') if g.strip() != ''], {})
print("Group %s: %s" % (group, group_answers))
return len(group_answers.keys())
total_count = reduce(lambda x,y: x+y, map(count_group_answers, groups), 0)
print("Total count: %s" % total_count)
|
10,149 | edcbc5c642684eb83cf45a131a74c91de4404a58 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.http import HtmlResponse
from bookparser.items import BookparserItem
class Book24Spider(scrapy.Spider):
name = 'book24'
allowed_domains = ['book24.ru']
def __init__(self, book):
self.start_urls = [f'https://book24.ru/search/?q={book}']
def parse(self, response: HtmlResponse):
next_page = response.xpath("//div[@class='catalog-pagination__list']/a[@class='catalog-pagination__item _text js-pagination-catalog-item']/@href").extract_first()
book_links = response.xpath("//div[@class='catalog-products__item js-catalog-products-item']//a[@class='book__image-link js-item-element ddl_product_link']/@href").extract()
for link in book_links:
yield response.follow(link, callback=self.book_parse)
yield response.follow(next_page, callback=self.parse)
def book_parse(self, response: HtmlResponse):
name = response.xpath("//div[@class='item-detail__informations-box']/h1[@class='item-detail__title']/text()").extract_first()
link = response.url
authors = response.xpath("//a[contains(@data-link,'author')]/text()").extract_first()
publisher = response.xpath(
"//div[@class='item-tab']//a[contains(@href,'brand')]/text()").extract_first()
price = response.xpath(
"//div[@class='item-actions__buttons-box']//div[@class='item-actions__price']/b/text()").extract_first()
currency = response.xpath(
"//div[@class='item-actions__buttons-box']//div[@class='item-actions__price']/text()").extract_first()
rate = response.xpath("//div[@class='item-detail__information-item']//span[@class='rating__rate-value']/text()").extract_first()
product_id = response.xpath("//a[@class='button _block _fill _d _item js-toggle js-product-card-button js-add2basket']/@data-product").extract_first()
yield BookparserItem(name=name, link=link, authors=authors, publisher=publisher, price=price, currency=currency, rate=rate,
product_id=product_id)
|
10,150 | ada2dff08d5221ed885ab97eabbd8d2972cf3396 | class Error(Exception):
pass
def foo():
for i in reversed(range(10)):
if i == 5:
raise Error
yield i
i = 0
try:
for (i, val) in enumerate(foo()):
print i
except Error:
print "caught exception at i = %d" % i
raise
|
10,151 | 297816b3f675d550d8abbe2ab4c2cee40c52b880 | #!/usr/bin/python3
#
# jip, 20.12.2017, falling into marasmus... :-)
#
import sys
import tkinter
from tkinter import Button
from tkinter import TOP, BOTTOM, LEFT
import time
from random import randint
#from Line import Line
from Point import Point
from Snowflake import Snowflake
def quit():
root.destroy()
sys.exit(0)
def get_color( r, g, b ):
c = '#%02x%02x%02x' % (r, g, b)
return c
# ------------------------------------
root = tkinter.Tk()
root.title("... marasmus was getting stronger ...")
Button(root, text="ะพะน ััั...", command=quit).pack( side = BOTTOM )
w=1200
h=800
canvas = tkinter.Canvas(root, width=w, height=h, background='black')
canvas.pack()
def start():
h = int( canvas['height'] )
w = int( canvas['width'] )
number_w = 14.0
between_w = w/number_w
r = 255
g = 255
b = 255
x = 0
y = 0
sfss = []
distance = 75
LIM = 11
while True:
if distance%75 == 0:
#if distance <= 45:
sfs = []
while x < w + 10:
direction = randint(-1, 1)
start_angle = randint(-10, 10)
l = randint(6, 9)
x_delta = randint(-30, 30)
y_delta = randint(-15, 15)
s = Snowflake( Point( x+x_delta, y_delta ), get_color( r, g, b ), l, start_angle, direction )
s.draw( canvas )
sfs.append( s )
x += between_w
sfss.insert( 0, sfs )
# sfss.append( sfs )
x = 0
distance += 1
#l += l
canvas.update()
time.sleep(0.0001)
for sf in sfss:
for s in sf:
s.mv( canvas, 0, 1 )
# canvas.update()
# print( "len(sfss): ", len(sfss) )
if( distance%1500) == 0: LIM -= 1
if len(sfss) >= LIM: del sfss[ len(sfss)-1 ]
root.after(0, start)
root.mainloop()
|
10,152 | 2619d6f9cab0cbe231a6a7d3bb56e7bc6d489089 | # inpld project - python script
# - AIM: poll the value of a sensor and send as OSC to sclang e.g. '/gsr 52'
import OSC
from OSC import OSCClient, OSCMessage
from threading import Timer
import Adafruit_BBIO.ADC as ADC
import random # for faking random ADC values
inPin = "P9_40" # connect sensor to this pin
sendAddress = '127.0.0.1', 57120 # address to send to SuperCollider
sensingPollRate = 0.05 # rate at which values will be read (0.05 = 20ms)
def init_sensing_loop():
Timer(sensingPollRate, sense_and_send_values).start()
def sense_and_send_values():
# sensedValue = ADC.read(inPin)
sensedValue = random.random() * 400 # faking it for now in the range 0 to 400
msg = OSC.OSCMessage() # do we need the OSC. here when OSCMessage has been declared explicitingly above?
msg.setAddress('/gsr')
msg.append(sensedValue)
print "sending locally to supercollider: '{0}', '{1}'".format(msg, client)
try:
client.send ( msg )
except:
print "waiting for supercollider to become available"
pass
init_sensing_loop() # recursive call, keeps timer going
# main:
ADC.setup()
client = OSCClient()
client.connect( sendAddress )
init_sensing_loop() # init call to start the sensing loop
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "\nClosing OSCServer."
pythonServer.close()
print "Waiting for Server-thread to finish"
st.join() ##!!!
print "Done"
|
10,153 | 9afe405dfceec2abf6c1dea58fc92c447c33ca51 | # coding = utf-8
# python 3.7.3
# created by wuyang on 2020/3/24
from .base import BaseNet
from .fcn import FCN, FCNHead
from .pspnet import PSPNet
from .deeplabv3 import DeepLabV3
models = {
"fcn": FCN,
"pspnet": PSPNet,
"deeplabv3": DeepLabV3,
}
def get_segmentation_model(name, **kwargs):
return models[name.lower()](**kwargs)
|
10,154 | 75a9863bd19775c603021dfb41e9f3f7b158f097 | import caw.widget
class Spacer(caw.widget.Widget):
def __init__(self, width=5, **kwargs):
super(Spacer, self).__init__(**kwargs)
self.width_hint = width
|
10,155 | 06643d2c93f551e8e046711b1116a076c7446f43 | s="hello python this is python language"
'''expected out put is
hello:1
python:2
this:1
is:1
language:1
'''
x=s.split()
d={}
for i in x:
if d.get(i):
d[i]=d.get(i)+1
else:
d[i]=1
print d
|
10,156 | 2cf66dbf96281e08a034ff4a43da177679efa423 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-09 21:18
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
from django.utils.timezone import now
class Migration(migrations.Migration):
dependencies = [
('csn', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='lwdevice',
name='dev_eui',
field=models.CharField(max_length=16, unique=True, validators=[django.core.validators.RegexValidator('^[0-9a-fA-F]+$', 'Should match the ^[0-9a-fA-F]+$ pattern'), django.core.validators.MinLengthValidator(16)]),
preserve_default=False,
),
migrations.AlterField(
model_name='lwdevice',
name='dev_addr',
field=models.CharField(max_length=8, validators=[django.core.validators.RegexValidator('^[0-9a-fA-F]+$', 'Should match the ^[0-9a-fA-F]+$ pattern'), django.core.validators.MinLengthValidator(8)]),
),
migrations.AlterField(
model_name='lwdevice',
name='nwkskey',
field=models.CharField(max_length=32, validators=[django.core.validators.RegexValidator('^[0-9a-fA-F]+$', 'Should match the ^[0-9a-fA-F]+$ pattern'), django.core.validators.MinLengthValidator(32)]),
),
]
|
10,157 | 8ca1e7fc49881ff859d35111f58754db0cd6881a | """Django templates for hiccup main page."""
|
10,158 | 740cb544c7e62fd40d5cebde9371cd34007dab0e |
def main():
#Escribe tu cรณdigo debajo de esta lรญnea
import math
x= int(input("Escribe un numero: "))
y= math.sqrt(x)
z= round(y)
u= z+1
print(str(u))
pass
if __name__=='__main__':
main()
|
10,159 | 13f2f8d8d0538b4f6cec0ef123eab769fb01f7d8 | <<<<<<< HEAD
import json
import re
import os
hotelname = input("่ฏท่พๅ
ฅ้
ๅบๅ๏ผ")
while not re.match(r'\w+', hotelname):
hotelname = input("่ฏท่พๅ
ฅๆญฃ็กฎ็้
ๅบๅ๏ผ")
json_filename='hotel_info.json'
f=open(json_filename,'r')
hotel_info=json.load(f)
for x in hotel_info:
y=x['hotel_name']
print(x)
print(y)
if y.find(hotelname) == -1:
find_tag = False
continue
else:
find_tag=True
break
if not find_tag:
hotel_flag = input("ๆฏๅฆ่ฆ่พๅ
ฅ้
ๅบไฟกๆฏ๏ผY/N ? ")
while not re.match(r'^[YyNn]$', hotel_flag):
hotel_flag = input("ๆฏๅฆ่ฆ่พๅ
ฅ้
ๅบไฟกๆฏ๏ผY/N ? ")
hotel_tmp = []
if re.match(r'[yY]', hotel_flag):
hotel_address = input("่ฏท่พๅ
ฅ้
ๅบๅฐๅ๏ผ")
hotel_phone = input("่ฏท่พๅ
ฅ้
ๅบ็ต่ฏ๏ผ")
hotel_data = {r'hotel_name': hotelname, r'hotel_address': hotel_address, r"hotel_phone": hotel_phone}
if not os.path.exists(json_filename):
f = open(json_filename, 'w+')
else:
f = open(json_filename,'r')
if f.read() == '':
hotel_tmp.append(hotel_data)
else:
f.close() # ๆไปถไธๅฎ่ฆๅ
ๅ
ณ้ญ๏ผๅ ไธบifๅคๆญ็ๆถๅๆไปถๆ้ๅทฒๆๅฐๆๅ
f = open(json_filename, 'r')
hotel_tmp = json.load(f)
hotel_tmp.append(hotel_data)
f.close()
with open(json_filename, 'w') as f:
json.dump(hotel_tmp, f, ensure_ascii=False)
=======
import win32api
html_filename = 'dish.html'
while True:
win32api.ShellExecute(0, 'print', html_filename, '', '', 1)
win32api.ShellExecute(0, 'open', html_filename, '', '', 1)
>>>>>>> merge dish2.0
|
10,160 | c0425fe2c2d713a8a97164d6127560d9d0154a2a | import os
os.system('clear')
import json
import importlib
import argparse
import gevent
import time
import requests
import smtplib
from smtplib import SMTPException
from gevent.threadpool import ThreadPool
from gevent.queue import Queue
from gevent import monkey
monkey.patch_all(thread=False)
os.environ['TERM'] = 'dumb'
input_file = None
threads_count = 0
global_headers = {}
global_post_param = {}
global_query_param = {}
domain = ''
tasks = Queue()
total_task = 0
success_task = 0
result = ''
def get_input():
try:
global input_file, threads_count
parser = argparse.ArgumentParser()
parser.add_argument('--input', help='testcase json file as input')
parser.add_argument('--threads', help='Thread Pool size (integer)')
input_file = parser.parse_args().input
threads_count = int(parser.parse_args().threads)
except Exception as e:
print 'Wrong input --help for help : Error(' + str(e) + ')'
exit(1)
if input_file is None or threads_count is None:
print 'Wrong input --help for help'
exit(1)
def send_mail(host, to):
sender = 'anurag.shukla@webyog.com'
message = 'From: Test Suite <anurag.shukla@webyog.com>\nTo: %s\nSubject: Test Suite - %d/%d Passed - %s\n\n%s\n '''
message = message % (', '.join(to), success_task, total_task, time.strftime('%d %B'), result)
try:
smtp = smtplib.SMTP(host)
smtp.sendmail(sender, to, message)
print "Successfully sent email"
except SMTPException:
print "Error: unable to send email"
def complete_task(test):
try:
for i, request in enumerate(test.TEST['request']):
if '://' not in request['url']:
request['url'] = domain + request['url']
if request.get('headers') is None:
request['headers'] = {}
request['headers'].update(test_suite.TEST_ENV['global_headers'])
if request.get('params') is None:
request['params'] = {}
request['params'].update(test_suite.TEST_ENV['global_query_param'])
if request.get('data') is None:
request['data'] = {}
request['data'].update(test_suite.TEST_ENV['global_post_param'])
if request.get('data_binary') is not None:
request['data'] = request.get('data_binary')
del request['data_binary']
if 'hooks' in test.TEST:
test.TEST['request']['hooks'](request)
print request
r = requests.Session().request(request['method'], request['url'],
request.get('params'), request.get('data'),
request.get('headers'), request.get('cookies'),
request.get('files'), request.get('auth'),
request.get('timeout'), request.get('allow_redirects', True),
request.get('proxies'), request.get('hooks'),
request.get('stream'), request.get('verify'), request.get('cert'))
response = test.TEST['response'][i]
if 'hooks' in response:
if not response['hooks'](r):
return False
else:
if 'status_code' in response and r.status_code != response['status_code']:
return False
if 'body' in response and r.content != response['body']:
return False
if 'header' in response:
for h in response['headers']:
if r.headers.get(h, None) == None:
return False
elif r.headers[h] != response[h]:
return False
except Exception as e:
print e
return False
global success_task
success_task += 1
return True
def worker():
while not tasks.empty():
task = tasks.get()
test = importlib.import_module(task[:task.find('.py')])
print '========================================================================'
print 'Starting Test:', test.TEST['name']
global result
if complete_task(test) is False:
result += test.TEST['name'] + ': Failed\n\n'
print test.TEST['name'], 'Test Failed'
continue
print test.TEST['name'], 'Test Success'
result += test.TEST['name'] + ': Passed\n\n'
if __name__ == '__main__':
get_input()
test_suite = importlib.import_module(input_file[input_file.rfind('/')+1:input_file.find('.py')])
print '========================================================================'
print 'Test Suite Project Name:', test_suite.TEST_ENV['project_name']
if 'init_hooks' in test_suite.TEST_ENV:
if not test_suite.TEST_ENV['init_hooks'](global_headers, global_post_param, global_query_param):
print 'Init Failed'
domain = '%s://%s' % (test_suite.TEST_ENV['protocol'], test_suite.TEST_ENV['domain'])
global total_task
for i in range(0, len(test_suite.TEST_ENV['testcases'])):
tasks.put_nowait(test_suite.TEST_ENV['testcases'][i])
total_task += 1
pool = ThreadPool(threads_count)
for _ in range(threads_count):
pool.spawn(worker)
pool.join()
if 'smtp_setting' in test_suite.TEST_ENV.keys() and len(test_suite.TEST_ENV['smtp_setting']) > 0:
send_mail(test_suite.TEST_ENV['smtp_setting']['host'], test_suite.TEST_ENV['smtp_setting']['to'])
|
10,161 | 0e013ca31c3b969c8dac3638ec56d4d12492aa8e | import tensorflow as tf
from absl.testing import parameterized
from meta_model import utils
def assert_specs_equal(actual, expected):
assert tuple(actual._shape) == tuple(expected._shape)
assert actual._dtype == expected._dtype
assert type(actual) == type(expected)
if isinstance(actual, tf.RaggedTensorSpec):
assert actual._ragged_rank == expected._ragged_rank
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
((None, 3), tf.int32),
((2,), tf.float64),
((2, None), tf.float32),
)
def test_tensor_placeholder(self, shape, dtype):
spec = tf.TensorSpec(shape=shape, dtype=dtype)
placeholder = utils.placeholder(spec)
assert tuple(placeholder.shape) == shape
assert placeholder.dtype == dtype
@parameterized.parameters(
((2, None), tf.int32, 1),
((None, None), tf.int64, 1),
((None, None, 2), tf.float64, 1),
((None, None, None), tf.float64, 2),
)
def test_ragged_placeholder(self, shape, dtype, ragged_rank):
expected = tf.RaggedTensorSpec(shape, ragged_rank=ragged_rank, dtype=dtype)
placeholder = utils.placeholder(expected)
assert_specs_equal(utils.type_spec(placeholder), expected)
assert tuple(placeholder.shape) == shape
assert placeholder.dtype == dtype
@parameterized.parameters(
((None, None), tf.int32),
((None, 3), tf.int32),
((2,), tf.float64),
((2, None), tf.float32),
)
def test_sparse_placeholder(self, shape, dtype):
spec = tf.SparseTensorSpec(shape, dtype)
placeholder = utils.placeholder(spec)
assert tuple(placeholder.shape) == shape
assert placeholder.dtype == dtype
# @parameterized.parameters(
# (tf.TensorSpec((2,), tf.float64), 3, None, tf.TensorSpec, (3, 2)),
# (tf.TensorSpec((None,), tf.float64), 3, False, tf.TensorSpec, (3, None)),
# (tf.TensorSpec((None,), tf.float64), None, False, tf.TensorSpec, (None, None)),
# (
# tf.TensorSpec((None,), tf.float64),
# None,
# True,
# tf.RaggedTensorSpec,
# (None, None),
# ),
# (
# tf.RaggedTensorSpec((None, None), tf.float32),
# None,
# None,
# tf.RaggedTensorSpec,
# (None, None, None),
# ),
# (
# tf.SparseTensorSpec((2, 3), tf.float64),
# None,
# None,
# tf.SparseTensorSpec,
# (None, 2, 3),
# ),
# )
# def test_batched_spec(self, spec, batch_size, ragged, expected_cls, expected_shape):
# actual = utils.batched_spec(spec, batch_size=batch_size, ragged=ragged)
# assert tuple(actual._shape) == expected_shape
# assert isinstance(actual, expected_cls)
# assert actual._dtype == spec._dtype
@parameterized.parameters(
(
tf.keras.Input((3,), batch_size=2, dtype=tf.float64),
tf.TensorSpec((2, 3), tf.float64),
),
(
tf.keras.Input(shape=(None,), batch_size=3, ragged=True, dtype=tf.float64),
tf.RaggedTensorSpec((3, None), tf.float64),
),
(
tf.keras.Input(shape=(4,), batch_size=3, sparse=True, dtype=tf.float64),
tf.SparseTensorSpec((3, 4), tf.float64),
),
)
def test_type_spec(self, x, expected):
assert_specs_equal(utils.type_spec(x), expected)
if __name__ == "__main__":
tf.test.main()
|
10,162 | 5ad63a3149707961bb9ac76e162e4159330da6d1 | """
ํฑ๋๋ฐํด
https://www.acmicpc.net/problem/14891
"""
def moveClock(g) :
temp = gears[g][-1]
for i in range(8) :
gears[g][i], temp = temp, gears[g][i]
def moveNonClock(g) :
temp = gears[g][0]
for i in range(7,-1,-1) :
gears[g][i], temp = temp, gears[g][i]
def moveGear(gear, direction, before) :
# ๊ธฐ์ด์ ์ค๋ฅธ์ชฝ
if before != 1 and gear+1 < 4 and (gears[gear][2] + gears[gear+1][6]) == 1 :
moveGear(gear+1,direction*(-1),2)
# # ๊ธฐ์ด์ ์ผ์ชฝ
if before != 2 and gear-1 >= 0 and (gears[gear][6] + gears[gear-1][2]) == 1 :
moveGear(gear-1,direction*(-1),1)
# # ํ์ฌ ๊ธฐ์ด ํ์
if direction == 1 :
moveClock(gear)
else :
moveNonClock(gear)
gears = [list(map(int, " ".join(input()).split())) for _ in range(4)]
K = int(input())
for _ in range(K) :
gear, direction = map(int, input().split())
moveGear(gear-1, direction, 0)
print(gears[0][0]*1+gears[1][0]*2+gears[2][0]*4+gears[3][0]*8)
"""
10101111
01111101
11001110
00000010
2
3 -1
1 1
""" |
10,163 | c918ca69685b0aff413be3d2e83a2e5221eebf10 | import urllib.request, urllib.error, urllib.parse, json
from flask import Flask, render_template, request, request
import logging
import random
app = Flask(__name__)
def safe_get(url):
try:
return urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
print("The server couldn't fulfill the request.")
print(url)
print("Error code: ", e.code)
except urllib.error.URLError as e:
print("We failed to reach a server")
print(url)
print("Reason: ", e.reason)
return Nonemain
def pretty(obj):
return json.dumps(obj, sort_keys=True, indent=2)
def create(questions,category,difficulty):
url = "https://opentdb.com/api.php?amount={questions}&category={category}&difficulty={difficulty}&type=multiple".format(questions=questions,category=category,difficulty=difficulty)
result = safe_get(url)
if result is not None:
return json.load(result)
answerd = []
key = []
@app.route("/", methods=['GET'])
def main_handler():
app.logger.info("In Main")
if request.method == 'GET':
app.logger.info(request.args.get('category'))
category = request.args.get('category')
app.logger.info(request.args.get('difficulty'))
difficulty = request.args.get('difficulty')
if category and difficulty:
answerd.clear()
key.clear()
data = create(1,category,difficulty.lower())
app.logger.info(data)
qdict1 = data['results'][0]['question']
qdict = qdict1.replace(""","\"")
incorrect = data['results'][0]['incorrect_answers']
correct = data['results'][0]['correct_answer']
for word in incorrect:
key.append(word)
key.insert(random.randint(0, 3), correct)
key1 = str(key)
key2 = key1.replace("[",'')
key3 = key2.replace("]",'')
answerd.insert(0,correct)
return render_template('question.html', qdict=qdict, key3=key3)
return render_template("home.html")
return render_template("home.html")
@app.route("/question", methods=['GET'])
def questionmake():
app.logger.info(request.args.get('answer'))
answer = request.args.get('answer')
if answer.lower() == answerd[0].lower():
return render_template("correct.html")
return render_template('wrong.html')
if __name__ == "__main__":
# Used when running locally only.
# When deploying to Google AppEngine, a webserver process
# will serve your app.
app.run(host="localhost", port=8080, debug=True)
|
10,164 | e00d82974c960d0322653a884a68e33197651a45 | import time, math, os
from selenium import webdriver
import pyperclip
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
options = webdriver.ChromeOptions()
options.binary_location = "/Applications/Google Chrome 2.app/Contents/MacOS/Google Chrome"
chrome_driver_binary = "/usr/local/bin/chromedriver"
browser = webdriver.Chrome(chrome_driver_binary, options=options)
wait = WebDriverWait(browser, 10)
# with open("test.txt", "w") as file:
# content = file.write("automationbypython")
# def get_twits ():
try:
link = "http://suninjuly.github.io/explicit_wait2.html"
browser.get(link)
print(wait.until(EC.text_to_be_present_in_element((By.ID, "price"), "$100")))
button = browser.find_element_by_id("book")
button.click()
# confirm = browser.switch_to.alert
# confirm_text = confirm.text
# confirm.accept()
# print(confirm_text)
browser.execute_script("window.scrollTo(0,100)")
# new_window = browser.window_handles[1]
# print(browser.window_handles)
# browser.switch_to.window(new_window )
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
val = browser.find_element_by_id('input_value').text
print(val)
ans = calc(val)
print(calc(val))
inp = browser.find_element_by_id('answer')
inp.send_keys(ans)
button = browser.find_element_by_id("solve")
button.click()
alert = browser.switch_to.alert
answer = alert.text.split()[-1]
print(answer)
pyperclip.copy(answer)
alert.accept()
finally:
time.sleep(10)
browser.quit() |
10,165 | 630c533a9c42d6a55747b8f874aee3daee3e96ef | import binascii
string = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"
nums = binascii.unhexlify(string) #Convert hex to binary-data
strings = (''.join(chr(num ^ key) for num in nums) for key in range(256))
test = max(strings, key=lambda s: sum((26-i) * s.count(c) for i, c in enumerate('etaoinshrdlu')))
print(keyparty, " : ", test)
|
10,166 | 5a170bfc6c894c0547755403b7b3028fa73bb949 | import random
import sqlite3
class DataBase:
def __init__(self):
self.ids = 0
self.conn = sqlite3.connect('card.s3db')
self.cur = self.conn.cursor()
def create_table(self) -> None:
self.cur.execute('''CREATE TABLE IF NOT EXISTS card (
id INTEGER,
number TEXT,
pin TEXT,
balance INTEGER DEFAULT 0);'''
)
self.conn.commit()
def update_cards_amount(self) -> int:
self.cur.execute("SELECT * FROM card")
self.ids = len(self.cur.fetchall())
return self.ids
def add_data(self, num, pin, balance=0) -> None:
self.update_cards_amount()
self.cur.execute(f'''
INSERT
INTO card (id, number, pin, balance)
VALUES ({self.ids}, {num}, {pin}, {balance});
''')
self.conn.commit()
def get_card(self, number: str) -> tuple:
self.cur.execute(f'''
SELECT * FROM card WHERE number = {number}
''')
return self.cur.fetchone()
def change_balance(self, amount: int, card: str) -> None:
self.cur.execute(f'''
UPDATE card
SET balance = balance + {amount}
WHERE number = {card};
''')
self.conn.commit()
def transfer(self, card_from: str, card_to: str) -> None:
if card_from == card_to:
print("You can't transfer money to the same account!\n")
return
card_to_query = self.get_card(card_to)
if card_to != card_to[:-1] + CreditCard.luhn_checker(card_to[:-1]):
print("Probably you made a mistake in the card number. Please try again!\n")
elif not card_to_query:
print("Such a card does not exist.\n")
else:
amount = int(input("Enter how much money you want to transfer:"))
while amount <= 0:
print("Incorrect amount. Try again")
amount = int(input("Enter how much money you want to transfer:"))
if self.get_card(card_from)[-1] < amount:
print("Not enough money!\n")
else:
self.cur.execute(f'''
UPDATE card
SET balance = balance - {amount}
WHERE number = {card_from};
''')
self.conn.commit()
self.cur.execute(f'''
UPDATE card
SET balance = balance + {amount}
WHERE number = {card_to};
''')
self.conn.commit()
print("Success!\n")
def delete_account(self, number) -> None:
self.cur.execute(f'''
DELETE FROM card
WHERE number = {number};
''')
self.conn.commit()
def __del__(self):
self.conn.close()
class CreditCard:
def __init__(self):
self.num = f"{random.randint(400000000000000, 400000999999999):15}"
self.num += self.luhn_checker(number=self.num)
self.pin = f"{random.randint(0000, 9999):04}"
self.balance = 0
@staticmethod
def luhn_checker(number: str) -> str:
numbers = [int(x) for x in number]
for i in range(len(numbers)):
if i % 2 == 0:
numbers[i] *= 2
if numbers[i] > 9:
numbers[i] -= 9
s = sum(numbers)
i = 0
while (s + i) % 10 != 0:
i += 1
return str(i)
def __repr__(self):
return self.num
def choose_action() -> str:
res = input("1. Create an account\n"
"2. Log into account\n"
"0. Exit\n")
print()
return res
def create_account():
credit_card = CreditCard()
database.add_data(credit_card.num, credit_card.pin)
print("Your card has been created\n"
"Your card number:\n"
f"{credit_card.num}\n"
"Your card PIN:\n"
f"{credit_card.pin}\n")
def log_into_account():
card_number = input("Enter your card number:")
pin = input("Enter your PIN:")
query = database.get_card(card_number)
if not query or query[2] != pin:
print("Wrong card number or PIN!")
else:
print("You have successfully logged in!")
while True:
login_choice = int(input("1. Balance\n"
"2. Add income\n"
"3. Do transfer\n"
"4. Close account\n"
"5. Log out\n"
"0. Exit\n"))
if login_choice == 1:
print("Balance:", query[3])
elif login_choice == 2:
income = int(input("Enter income:"))
database.change_balance(income, card_number)
query = database.get_card(card_number)
print("Income was added!\n")
elif login_choice == 3:
print("Transfer")
card_to = input("Enter card number:")
database.transfer(card_number, card_to)
elif login_choice == 4:
database.delete_account(card_number)
print("The account has been closed!\n")
break
elif login_choice == 5:
print("You have successfully logged out!")
break
elif login_choice == 0:
exit()
database = DataBase()
database.create_table()
while True:
action = choose_action()
if action == "1":
create_account()
elif action == "2":
log_into_account()
elif action == "0":
print("Bye!")
exit()
|
10,167 | e57eb8cbf84b2f54c47c5fda7f86ce41383bccd9 | from time import sleep
from .automodule import AutoModule
from .siad import Siad, Result
class AutoUnlock(AutoModule):
def __init__(self, configuration_dictionary: dict, siad: Siad):
self.password = configuration_dictionary.get("wallet-password", None)
self.siad = siad
if self.password is None:
raise ValueError(
"Unlock module misses configuration "
"parameter 'wallet-password'."
)
@property
def name(self) -> str:
return "AutoUnlock"
def print_settings(self):
self.print("Module enabled")
def unlock(self):
if self.siad.unlock_wallet(self.password) == Result.FAILURE:
self.print("Failed unlocking the wallet. Retrying...")
else:
self.print("Unlocked Wallet")
def _run(self):
while True:
if not self.siad.wallet_unlocked:
self.unlock()
sleep(10)
|
10,168 | fc86adc251a10c55c6e20c3e9e355fcfac8427c5 | def chang_first_value(list_to_change):
"""Changes a list inside the function"""
list_to_change[0]='something different'
some_nums=[2,6,4,2,22,54,12,8,-1]
print(some_nums)
chang_first_value(some_nums)
print(some_nums) |
10,169 | 8c2286d0911f5970eccd29122030f9f285c0cab3 | from django.test import TestCase
from django.test import Client
# Create your tests here.
class NoteTest(TestCase):
def test_get_note(self):
c = Client()
response = c.get('/edit/40/6/1/')
self.assertEqual(response.status_code, 200)
def test_save_note(self):
c = Client()
response = c.post('/save/40/6/1/', {'note':'das ist mein Kommentar'})
self.assertEqual(response.status_code, 200) |
10,170 | a58062de2c7ce4e6ea7bb2e2156aae1c607fe6c3 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""This object is returned when an error occurs in the Maps API.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: If available, a human readable description of the error.
:vartype message: str
:ivar target: If available, the component generating the error.
:vartype target: str
:ivar details: If available, a list of additional details about the error.
:vartype details: list[~azure.mgmt.maps.models.ErrorDetailsItem]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetailsItem]'},
}
def __init__(self, **kwargs) -> None:
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
|
10,171 | e7d6f6c6090d5dfa8a08b2349182403f61f92e9a | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class CdbTdsqlExpandInstanceRequest(Request):
def __init__(self):
super(CdbTdsqlExpandInstanceRequest, self).__init__(
'tdsql', 'qcloudcliV1', 'CdbTdsqlExpandInstance', 'tdsql.api.qcloud.com')
def get_cdbInstanceUuid(self):
return self.get_params().get('cdbInstanceUuid')
def set_cdbInstanceUuid(self, cdbInstanceUuid):
self.add_param('cdbInstanceUuid', cdbInstanceUuid)
def get_curDeadline(self):
return self.get_params().get('curDeadline')
def set_curDeadline(self, curDeadline):
self.add_param('curDeadline', curDeadline)
def get_dbType(self):
return self.get_params().get('dbType')
def set_dbType(self, dbType):
self.add_param('dbType', dbType)
def get_newDbType(self):
return self.get_params().get('newDbType')
def set_newDbType(self, newDbType):
self.add_param('newDbType', newDbType)
|
10,172 | e2a8293b9d136ed9f85c25562c1d78fd9705f29c | import json
import numpy as np
import pandas as pd
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
# In[2]:
#reading json
with open("./train.json") as f:
data = json.load(f)
df = pd.io.json.json_normalize(data)
df.columns = df.columns.map(lambda x: x.split(".")[-1])
df = df.values
ingridients = []
cusine = []
for row in df:
ingridients.append(row[2])
cusine.append(row[0])
# getting unique elements
cSet = set()
iSet = set()
for row in ingridients:
for column in row:
iSet.add(column)
for row in cusine:
cSet.add(row)
cSet = list(cSet)
iSet = list(iSet)
#processing for training set attribuites
tI = []
for row in ingridients:
temp = []
for x in iSet:
if(x in row):
temp.append(1)
else:
temp.append(0)
tI.append(temp)
#processing for training set classes
tC = []
for x in cusine:
tC.append(cSet.index(x))
#reading json file test data
with open("./test.json") as f:
data = json.load(f)
tf = pd.io.json.json_normalize(data)
tf.columns = tf.columns.map(lambda x: x.split(".")[-1])
tf = tf.values
#converting to tuples
testId = []
testIng = []
for row in tf:
testId.append(row[0])
testIng.append(row[1])
#processing for test attribuites and classes
tstIng = []
tstId = []
for row in testIng:
temp = []
for x in iSet:
if(x in row):
temp.append(1)
else:
temp.append(0)
tstIng.append(temp)
#train and classify Decision tree
dtcClf = tree.DecisionTreeClassifier()
dtcClf = dtcClf.fit(tI, tC)
dtcPrediction = dtcClf.predict(tstIng)
f = open("resultDT.csv", "w")
f.write("id,cuisine\n")
for i in range(9944):
f.write("%s,%s\n" % (testId[i],cSet[dtcPrediction[i]]))
f.close()
|
10,173 | 3e66d928167468e45ae6c6962af56cbc8326a339 | import requests
import time
from config import URL
from config import LOCATION
from bs4 import BeautifulSoup
raw_html = requests.get(URL).text
data = BeautifulSoup(raw_html, 'html.parser')
tl = (data.select('h2')[1])
tl = (tl.encode_contents())
tl = tl.rstrip(' <span id="displayDate"></span>')
if (data.select('td')[0].text[0:5]) == "High" or (data.select('td')[0].text[0:5]) == "Low":
hl0 = (data.select('td')[0].text[0:5])
hl = 1
else:
print ("ERROR 0")
if (data.select('td')[1].text[0:5]) == "High" or (data.select('td')[1].text[0:5]) == "Low":
hl1 = (data.select('td')[1].text[0:5])
hl = 2
else:
print ("ERROR 1")
if (data.select('td')[2].text[0:5]) == "High" or (data.select('td')[2].text[0:5]) == "Low":
hl2 = (data.select('td')[2].text[0:5])
hl = 3
else:
print ("ERROR 2")
if (data.select('td')[3].text[0:5]) == "High" or (data.select('td')[3].text[0:5]) == "Low":
hl3 = (data.select('td')[3].text[0:5])
hl = 4
else:
if (data.select('td')[15].text[0:5]) == "High" or (data.select('td')[15].text[0:5]) == "Low":
# 3 tide times today and 4 tide times tomorrow
hl3 = (data.select('td')[12].text[0:5])
hl4 = (data.select('td')[13].text[0:5])
hl5 = (data.select('td')[14].text[0:5])
tt0 = (data.select('td')[3].text[0:5])
tt1 = (data.select('td')[4].text[0:5])
tt2 = (data.select('td')[5].text[0:5])
tt3 = (data.select('td')[16].text[0:5])
tt4 = (data.select('td')[17].text[0:5])
tt5 = (data.select('td')[18].text[0:5])
th0 = (data.select('td')[6].text[0:4])
th0 = th0.rstrip()
th1 = (data.select('td')[7].text[0:4])
th1 = th1.rstrip()
th2 = (data.select('td')[8].text[0:4])
th2 = th2.rstrip()
th3 = (data.select('td')[20].text[0:4])
th3 = th3.rstrip()
th4 = (data.select('td')[21].text[0:4])
th4 = th4.rstrip()
th5 = (data.select('td')[22].text[0:4])
th5 = th5.rstrip()
text_file = open("%s/tidestore.py" % LOCATION, "w")
text_file.write("tl = '%s'" % tl)
text_file.write("\nhl = '%s'" % hl)
text_file.write("\nhl0 = '%s'" % hl0)
text_file.write("\nhl1 = '%s'" % hl1)
text_file.write("\nhl2 = '%s'" % hl2)
text_file.write("\nhl3 = '%s'" % hl3)
text_file.write("\nhl4 = '%s'" % hl4)
text_file.write("\nhl5 = '%s'" % hl5)
text_file.write("\ntt0 = '%s'" % tt0)
text_file.write("\ntt1 = '%s'" % tt1)
text_file.write("\ntt2 = '%s'" % tt2)
text_file.write("\ntt3 = '%s'" % tt3)
text_file.write("\ntt4 = '%s'" % tt4)
text_file.write("\ntt5 = '%s'" % tt5)
text_file.write("\nth0 = '%s'" % th0)
text_file.write("\nth1 = '%s'" % th1)
text_file.write("\nth2 = '%s'" % th2)
text_file.write("\nth3 = '%s'" % th3)
text_file.write("\nth4 = '%s'" % th4)
text_file.write("\nth5 = '%s'" % th5)
text_file.close()
time.sleep(150)
text_file = open("%s/tidestorex.py" % LOCATION, "w")
text_file.write("tl = '%s'" % tl)
text_file.write("\nhl = '%s'" % hl)
text_file.write("\nhlx0 = '%s'" % hl0)
text_file.write("\nhlx1 = '%s'" % hl1)
text_file.write("\nhlx2 = '%s'" % hl2)
text_file.write("\nhlx3 = '%s'" % hl3)
text_file.write("\nhlx4 = '%s'" % hl4)
text_file.write("\nhlx5 = '%s'" % hl5)
text_file.write("\nttx0 = '%s'" % tt0)
text_file.write("\nttx1 = '%s'" % tt1)
text_file.write("\nttx2 = '%s'" % tt2)
text_file.write("\nttx3 = '%s'" % tt3)
text_file.write("\nttx4 = '%s'" % tt4)
text_file.write("\nttx5 = '%s'" % tt5)
text_file.write("\nthx0 = '%s'" % th0)
text_file.write("\nthx1 = '%s'" % th1)
text_file.write("\nthx2 = '%s'" % th2)
text_file.write("\nthx3 = '%s'" % th3)
text_file.write("\nthx4 = '%s'" % th4)
text_file.write("\nthx5 = '%s'" % th5)
text_file.close()
quit()
else:
# 3 tide times today and 3 tide times tomorrow
hl3 = (data.select('td')[12].text[0:5])
hl4 = (data.select('td')[13].text[0:5])
hl5 = (data.select('td')[14].text[0:5])
tt0 = (data.select('td')[3].text[0:5])
tt1 = (data.select('td')[4].text[0:5])
tt2 = (data.select('td')[5].text[0:5])
tt3 = (data.select('td')[15].text[0:5])
tt4 = (data.select('td')[16].text[0:5])
tt5 = (data.select('td')[17].text[0:5])
th0 = (data.select('td')[6].text[0:4])
th0 = th0.rstrip()
th1 = (data.select('td')[7].text[0:4])
th1 = th1.rstrip()
th2 = (data.select('td')[8].text[0:4])
th2 = th2.rstrip()
th3 = (data.select('td')[18].text[0:4])
th3 = th3.rstrip()
th4 = (data.select('td')[19].text[0:4])
th4 = th4.rstrip()
th5 = (data.select('td')[20].text[0:4])
th5 = th5.rstrip()
text_file = open("%s/tidestore.py" % LOCATION, "w")
text_file.write("tl = '%s'" % tl)
text_file.write("\nhl = '%s'" % hl)
text_file.write("\nhl0 = '%s'" % hl0)
text_file.write("\nhl1 = '%s'" % hl1)
text_file.write("\nhl2 = '%s'" % hl2)
text_file.write("\nhl3 = '%s'" % hl3)
text_file.write("\nhl4 = '%s'" % hl4)
text_file.write("\nhl5 = '%s'" % hl5)
text_file.write("\ntt0 = '%s'" % tt0)
text_file.write("\ntt1 = '%s'" % tt1)
text_file.write("\ntt2 = '%s'" % tt2)
text_file.write("\ntt3 = '%s'" % tt3)
text_file.write("\ntt4 = '%s'" % tt4)
text_file.write("\ntt5 = '%s'" % tt5)
text_file.write("\nth0 = '%s'" % th0)
text_file.write("\nth1 = '%s'" % th1)
text_file.write("\nth2 = '%s'" % th2)
text_file.write("\nth3 = '%s'" % th3)
text_file.write("\nth4 = '%s'" % th4)
text_file.write("\nth5 = '%s'" % th5)
text_file.close()
time.sleep(150)
text_file = open("%s/tidestorex.py" % LOCATION, "w")
text_file.write("tl = '%s'" % tl)
text_file.write("\nhl = '%s'" % hl)
text_file.write("\nhlx0 = '%s'" % hl0)
text_file.write("\nhlx1 = '%s'" % hl1)
text_file.write("\nhlx2 = '%s'" % hl2)
text_file.write("\nhlx3 = '%s'" % hl3)
text_file.write("\nhlx4 = '%s'" % hl4)
text_file.write("\nhlx5 = '%s'" % hl5)
text_file.write("\nttx0 = '%s'" % tt0)
text_file.write("\nttx1 = '%s'" % tt1)
text_file.write("\nttx2 = '%s'" % tt2)
text_file.write("\nttx3 = '%s'" % tt3)
text_file.write("\nttx4 = '%s'" % tt4)
text_file.write("\nttx5 = '%s'" % tt5)
text_file.write("\nthx0 = '%s'" % th0)
text_file.write("\nthx1 = '%s'" % th1)
text_file.write("\nthx2 = '%s'" % th2)
text_file.write("\nthx3 = '%s'" % th3)
text_file.write("\nthx4 = '%s'" % th4)
text_file.write("\nthx5 = '%s'" % th5)
text_file.close()
quit()
if (data.select('td')[4].text[0:5]) == "High" or (data.select('td')[4].text[0:5]) == "Low":
hl4 = (data.select('td')[4].text[0:5])
hl = 5
else:
if (data.select('td')[18].text[0:5]) == "High" or (data.select('td')[18].text[0:5]) == "Low":
# 4 tide times today and 4 tide times tomorrow
hl4 = (data.select('td')[15].text[0:5])
hl5 = (data.select('td')[16].text[0:5])
hl6 = (data.select('td')[17].text[0:5])
tt0 = (data.select('td')[4].text[0:5])
tt1 = (data.select('td')[5].text[0:5])
tt2 = (data.select('td')[6].text[0:5])
tt3 = (data.select('td')[7].text[0:5])
tt4 = (data.select('td')[19].text[0:5])
tt5 = (data.select('td')[20].text[0:5])
tt6 = (data.select('td')[21].text[0:5])
th0 = (data.select('td')[8].text[0:4])
th0 = th0.rstrip()
th1 = (data.select('td')[9].text[0:4])
th1 = th1.rstrip()
th2 = (data.select('td')[10].text[0:4])
th2 = th2.rstrip()
th3 = (data.select('td')[11].text[0:4])
th3 = th3.rstrip()
th4 = (data.select('td')[23].text[0:4])
th4 = th4.rstrip()
th5 = (data.select('td')[24].text[0:4])
th5 = th5.rstrip()
th6 = (data.select('td')[25].text[0:4])
th6 = th6.rstrip()
text_file = open("%s/tidestore.py" % LOCATION, "w")
text_file.write("tl = '%s'" % tl)
text_file.write("\nhl = '%s'" % hl)
text_file.write("\nhl0 = '%s'" % hl0)
text_file.write("\nhl1 = '%s'" % hl1)
text_file.write("\nhl2 = '%s'" % hl2)
text_file.write("\nhl3 = '%s'" % hl3)
text_file.write("\nhl4 = '%s'" % hl4)
text_file.write("\nhl5 = '%s'" % hl5)
text_file.write("\nhl6 = '%s'" % hl6)
text_file.write("\ntt0 = '%s'" % tt0)
text_file.write("\ntt1 = '%s'" % tt1)
text_file.write("\ntt2 = '%s'" % tt2)
text_file.write("\ntt3 = '%s'" % tt3)
text_file.write("\ntt4 = '%s'" % tt4)
text_file.write("\ntt5 = '%s'" % tt5)
text_file.write("\ntt6 = '%s'" % tt6)
text_file.write("\nth0 = '%s'" % th0)
text_file.write("\nth1 = '%s'" % th1)
text_file.write("\nth2 = '%s'" % th2)
text_file.write("\nth3 = '%s'" % th3)
text_file.write("\nth4 = '%s'" % th4)
text_file.write("\nth5 = '%s'" % th5)
text_file.write("\nth6 = '%s'" % th6)
text_file.close()
time.sleep(150)
text_file = open("%s/tidestorex.py" % LOCATION, "w")
text_file.write("tl = '%s'" % tl)
text_file.write("\nhl = '%s'" % hl)
text_file.write("\nhlx0 = '%s'" % hl0)
text_file.write("\nhlx1 = '%s'" % hl1)
text_file.write("\nhlx2 = '%s'" % hl2)
text_file.write("\nhlx3 = '%s'" % hl3)
text_file.write("\nhlx4 = '%s'" % hl4)
text_file.write("\nhlx5 = '%s'" % hl5)
text_file.write("\nhlx6 = '%s'" % hl6)
text_file.write("\nttx0 = '%s'" % tt0)
text_file.write("\nttx1 = '%s'" % tt1)
text_file.write("\nttx2 = '%s'" % tt2)
text_file.write("\nttx3 = '%s'" % tt3)
text_file.write("\nttx4 = '%s'" % tt4)
text_file.write("\nttx5 = '%s'" % tt5)
text_file.write("\nttx6 = '%s'" % tt6)
text_file.write("\nthx0 = '%s'" % th0)
text_file.write("\nthx1 = '%s'" % th1)
text_file.write("\nthx2 = '%s'" % th2)
text_file.write("\nthx3 = '%s'" % th3)
text_file.write("\nthx4 = '%s'" % th4)
text_file.write("\nthx5 = '%s'" % th5)
text_file.write("\nthx6 = '%s'" % th6)
text_file.close()
else:
# 4 tide times today and 3 tide times tomorrow
hl4 = (data.select('td')[15].text[0:5])
hl5 = (data.select('td')[16].text[0:5])
hl6 = (data.select('td')[17].text[0:5])
tt0 = (data.select('td')[4].text[0:5])
tt1 = (data.select('td')[5].text[0:5])
tt2 = (data.select('td')[6].text[0:5])
tt3 = (data.select('td')[7].text[0:5])
tt4 = (data.select('td')[18].text[0:5])
tt5 = (data.select('td')[19].text[0:5])
tt6 = (data.select('td')[20].text[0:5])
th0 = (data.select('td')[8].text[0:4])
th0 = th0.rstrip()
th1 = (data.select('td')[9].text[0:4])
th1 = th1.rstrip()
th2 = (data.select('td')[10].text[0:4])
th2 = th2.rstrip()
th3 = (data.select('td')[11].text[0:4])
th3 = th3.rstrip()
th4 = (data.select('td')[21].text[0:4])
th4 = th4.rstrip()
th5 = (data.select('td')[22].text[0:4])
th5 = th5.rstrip()
th6 = (data.select('td')[23].text[0:4])
th6 = th6.rstrip()
text_file = open("%s/tidestore.py" % LOCATION, "w")
text_file.write("tl = '%s'" % tl)
text_file.write("\nhl = '%s'" % hl)
text_file.write("\nhl0 = '%s'" % hl0)
text_file.write("\nhl1 = '%s'" % hl1)
text_file.write("\nhl2 = '%s'" % hl2)
text_file.write("\nhl3 = '%s'" % hl3)
text_file.write("\nhl4 = '%s'" % hl4)
text_file.write("\nhl5 = '%s'" % hl5)
text_file.write("\nhl6 = '%s'" % hl6)
text_file.write("\ntt0 = '%s'" % tt0)
text_file.write("\ntt1 = '%s'" % tt1)
text_file.write("\ntt2 = '%s'" % tt2)
text_file.write("\ntt3 = '%s'" % tt3)
text_file.write("\ntt4 = '%s'" % tt4)
text_file.write("\ntt5 = '%s'" % tt5)
text_file.write("\ntt6 = '%s'" % tt6)
text_file.write("\nth0 = '%s'" % th0)
text_file.write("\nth1 = '%s'" % th1)
text_file.write("\nth2 = '%s'" % th2)
text_file.write("\nth3 = '%s'" % th3)
text_file.write("\nth4 = '%s'" % th4)
text_file.write("\nth5 = '%s'" % th5)
text_file.write("\nth6 = '%s'" % th6)
text_file.close()
time.sleep(150)
text_file = open("%s/tidestorex.py" % LOCATION, "w")
text_file.write("tl = '%s'" % tl)
text_file.write("\nhl = '%s'" % hl)
text_file.write("\nhlx0 = '%s'" % hl0)
text_file.write("\nhlx1 = '%s'" % hl1)
text_file.write("\nhlx2 = '%s'" % hl2)
text_file.write("\nhlx3 = '%s'" % hl3)
text_file.write("\nhlx4 = '%s'" % hl4)
text_file.write("\nhlx5 = '%s'" % hl5)
text_file.write("\nhlx6 = '%s'" % hl6)
text_file.write("\nttx0 = '%s'" % tt0)
text_file.write("\nttx1 = '%s'" % tt1)
text_file.write("\nttx2 = '%s'" % tt2)
text_file.write("\nttx3 = '%s'" % tt3)
text_file.write("\nttx4 = '%s'" % tt4)
text_file.write("\nttx5 = '%s'" % tt5)
text_file.write("\nttx6 = '%s'" % tt6)
text_file.write("\nthx0 = '%s'" % th0)
text_file.write("\nthx1 = '%s'" % th1)
text_file.write("\nthx2 = '%s'" % th2)
text_file.write("\nthx3 = '%s'" % th3)
text_file.write("\nthx4 = '%s'" % th4)
text_file.write("\nthx5 = '%s'" % th5)
text_file.write("\nthx6 = '%s'" % th6)
text_file.close()
|
10,174 | c32ddc2de616c74e1108251f8266f0c5d526b249 | import module_fibonacci
module_fibonacci.fib(7) |
10,175 | d9ff288d468ff914c4b8e1467f191a61562857f0 | from ginn.core import *
from ginn import utils
from ginn import models
|
10,176 | 947f82ace44df37ef71984b76b8ab86d5da43fa0 | import PySimpleGUI as sg
import tkinter as tk
#### 00 get input files from user
sg.change_look_and_feel('Topanga') ### my GUI colourful setting
layout1 = [
[sg.Text('Number of input files: '), sg.Input()],
[sg.OK(), sg.Cancel()]
]
######Display first window to ask how many input files
window = sg.Window('Protease Processor', layout1)
event, Number_input_files = window.Read()
Number_questions_to_ask = Number_input_files[0]
Number_questions_to_ask = int(Number_questions_to_ask)
text = ''
x = 0
while x != Number_questions_to_ask:
text += f'[sg.Text(CSV file {x+1}: )), sg.Input(), sg.FileBrowse()],\n'
x += 1
while x != Number_questions_to_ask:
text=tk.StringVar()
x += 1
print(text)
layout2 = [
text,
[sg.Text('Select output path'), sg.Input(), sg.FolderBrowse()],
[sg.OK(), sg.Cancel()]
]
window = sg.Window('Protease Processor', layout2)
event, inp = window.Read()
print(inp) |
10,177 | 4c47361a8d08f89954100c486b39b4da3cddfbfc | from misc import *
start = time.time()
def gcdpower(a,b):
while(a != b and b != 0 and a!=0):
if(a >= 2*b):
a %= 2*b
if(a > b):
a,b = b,a
if(2*a >= b):
a,b = a,2*a-b
a,b = max(a,b),min(a,b)
return min(a,b)
n = 2000
matrix = [[10,1],[1,0]]
mod = 987898789
total = 0
gcds = [0]*(n+1)
for a in xrange(1,n+1):
for b in xrange(1,n+1):
gcds[gcdpower(a,b)] += 1
for c in xrange(1,n+1):
matrix = [[10,1],[1,0]]
#m = m_e(matrix,c,mod)
if(c % 2 == 0):
total += gcds[0]
else:
total += gcds[0]*10
for a in xrange(1,n+1):
matrix = m_e(matrix,c,mod)
total += (matrix[0][0] * gcds[a])
total %= mod
print total
print time.time()- start
|
10,178 | b6b2545ebf075eda66b4e453addac6491a20eba0 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
SECRET_KEY = 'thisisdfjdshkddfjchanged'
CSRF_SESSION_KEY = 'ad190242b1dd440584ab5324688526dshb'
|
10,179 | ed1c5e019342cd783c2ba7709b9c337018b69242 | from models.gate import Gate
class Light(Gate):
def place_gate(self):
z = self.coord[0]
x = self.coord[1]
self.editor.set_block(x+0, 50, z+0, "gray_wool")
self.editor.set_block(x+1, 50, z+0, "gray_wool")
self.editor.set_block(x+2, 50, z+0, "gray_wool")
self.editor.set_block(x+0, 50, z+1, "gray_wool")
self.editor.set_block(x+1, 50, z+1, "gray_wool")
self.editor.set_block(x+2, 50, z+1, "gray_wool")
self.editor.set_block(x+0, 50, z+2, "gray_wool")
self.editor.set_block(x+1, 50, z+2, "gray_wool")
self.editor.set_block(x+2, 50, z+2, "gray_wool")
self.editor.set_block(x+1, 51, z+1, "gray_wool")
self.editor.set_block(x+1, 53, z+1, "gray_wool")
self.editor.set_block(x+1, 52, z+1, "redstone_torch")
self.editor.set_block(x+1, 54, z+1, "redstone_torch", {"lit": "false"})
self.editor.set_block(x+1, 55, z+1, "redstone_lamp")
self.editor.set_block(x+0, 54, z+1, "redstone_lamp")
self.editor.set_block(x+2, 54, z+1, "redstone_lamp")
self.editor.set_block(x+1, 54, z+0, "redstone_lamp")
self.editor.set_block(x+1, 54, z+2, "redstone_lamp")
self.editor.set_block(x+1, 51, z+2, "repeater", {"facing": "north", "delay": "1", "powered": "false", "locked": "false"})
self.editor.set_block(x+1, 51, z+0, "repeater", {"facing": "north", "delay": "1", "powered": "false", "locked": "false"})
|
10,180 | f6cf82b43aef951d7e44919d0833e6f36de2cdf9 |
# def calc(*numbers):
# sum=0
# for n in numbers:
# sum = sum + n*n
# return sum
# print(calc(1,2))
# from conf import conf
# # conf={'s':1}
# def sum(a,b):
# s=a+b
# return s
# if __name__=='__main__':
# print(sum(1,2))
# # print(conf['s'])
# def person(name,age,**kw):
# print(name,age,kw)
# def person(name,age):
# print(name,age)
# person('2','2')
# def person(name,age,**kw):
# print('name:', name, 'age:',age,' other:',kw)
# # person('gaoyang',23)
# # person('gaoyang',23,city='shijiazhuang')
# person('gaoyang',23,city='shijiazhuang',job='ceshi')
# def person(name,age,*,city,job):
# print(name,age,city,job)
# person('gaoyang',24,city=โshijiazhuangโ,job='ceshi')
# def person(name,age,*city,job):
# print(name,age,city,job)
# person('gaoyang',24,โshijiazhuangโ,job='ceshi')
# def person(name,age,*,city='shijiazhuang',job):
# print(name,age,city,job)
# person('gaoyang',23,job='ceshi')
# def person(name,age,*,city,job='ceshi'):
# print(name,age,city,job)
# person('gaoyang',23,city='shijiazhuang')
# def person(name,age,*,city='shijiazhuang',job='ceshi'):
# print(name,age,city,job)
# person('gaoyang',23)
# def f1(a,b,c=0,*args,**kw)
# print('a=',a,'b=',b,'c=',c,'args=',aegs,'kw=',kw)
def product(*args):
s=1
for x in args:
s=s*x
print(s)
product(2,4,3,5)
def product(*args):
n=len(args)
s=1
i=0
while i<n:
s=s*args[i]
i=i+1
print(s)
product(3,4,2,5)
|
10,181 | 5ef6253279c1788d925e7a5dc59c22edb6e18b5f | #Salary Prediction based on number of years of experience
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# import the dataset
dataset = pd.read_csv('Salary-Data.csv')
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
# spliting the dataset into trainig and testing
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=0)
# design the model
regressor = LinearRegression()
regressor.fit(x_train, y_train)
# prediction
y_pred = regressor.predict(x_test)
# visualize data
plt.scatter(x_train, y_train, color='red')
plt.plot(x_train, regressor.predict(x_train), color='blue')
plt.title('salary vs year of experience')
plt.xlabel('year of experience')
plt.ylabel('salary')
plt.show()
plt.scatter(x_test, y_test, color='red')
plt.plot(x_train, regressor.predict(x_train), color='blue')
plt.title('salary vs year of experience')
plt.xlabel('year of experience')
plt.ylabel('salary')
plt.show()
|
10,182 | e38a779ff0f67f76039dd5050614675f4adaf99d | #!/usr/bin/env python
# coding: utf-8
# # Langchain Quickstart
#
# In this quickstart you will create a simple LLM Chain and learn how to log it and get feedback on an LLM response.
#
# [](https://colab.research.google.com/github/truera/trulens/blob/main/trulens_eval/examples/quickstart.ipynb)
# ## Setup
# ### Add API keys
# For this quickstart you will need Open AI and Huggingface keys
import os
os.environ["OPENAI_API_KEY"] = "..."
os.environ["HUGGINGFACE_API_KEY"] = "..."
# ### Import from LangChain and TruLens
# Imports main tools:
from trulens_eval import Feedback
from trulens_eval import Huggingface
from trulens_eval import Tru
from trulens_eval import TruChain
tru = Tru()
# Imports from langchain to build app. You may need to install langchain first
# with the following:
# ! pip install langchain>=0.0.170
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts.chat import ChatPromptTemplate
from langchain.prompts.chat import HumanMessagePromptTemplate
from langchain.prompts.chat import PromptTemplate
# ### Create Simple LLM Application
#
# This example uses a LangChain framework and OpenAI LLM
full_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template=
"Provide a helpful response with relevant background information for the following: {prompt}",
input_variables=["prompt"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([full_prompt])
llm = OpenAI(temperature=0.9, max_tokens=128)
chain = LLMChain(llm=llm, prompt=chat_prompt_template, verbose=True)
# ### Send your first request
prompt_input = 'ยฟque hora es?'
llm_response = chain(prompt_input)
print(llm_response)
# ## Initialize Feedback Function(s)
# Initialize Huggingface-based feedback function collection class:
hugs = Huggingface()
# Define a language match feedback function using HuggingFace.
f_lang_match = Feedback(hugs.language_match).on_input_output()
# By default this will check language match on the main app input and main app
# output.
# ## Instrument chain for logging with TruLens
truchain = TruChain(
chain,
app_id='Chain1_ChatApplication',
feedbacks=[f_lang_match],
tags="prototype"
)
# Instrumented chain can operate like the original:
llm_response = truchain(prompt_input)
print(llm_response)
# ## Explore in a Dashboard
tru.run_dashboard() # open a local streamlit app to explore
# tru.stop_dashboard() # stop if needed
# Alternatively, you can run `trulens-eval` from a command line in the same folder to start the dashboard.
# ### Chain Leaderboard
#
# Understand how your LLM application is performing at a glance. Once you've set up logging and evaluation in your application, you can view key performance statistics including cost and average feedback value across all of your LLM apps using the chain leaderboard. As you iterate new versions of your LLM application, you can compare their performance across all of the different quality metrics you've set up.
#
# Note: Average feedback values are returned and printed in a range from 0 (worst) to 1 (best).
#
# 
#
# To dive deeper on a particular chain, click "Select Chain".
#
# ### Understand chain performance with Evaluations
#
# To learn more about the performance of a particular chain or LLM model, we can select it to view its evaluations at the record level. LLM quality is assessed through the use of feedback functions. Feedback functions are extensible methods for determining the quality of LLM responses and can be applied to any downstream LLM task. Out of the box we provide a number of feedback functions for assessing model agreement, sentiment, relevance and more.
#
# The evaluations tab provides record-level metadata and feedback on the quality of your LLM application.
#
# 
#
# ### Deep dive into full chain metadata
#
# Click on a record to dive deep into all of the details of your chain stack and underlying LLM, captured by tru_chain.
#
# 
#
# If you prefer the raw format, you can quickly get it using the "Display full chain json" or "Display full record json" buttons at the bottom of the page.
# Note: Feedback functions evaluated in the deferred manner can be seen in the "Progress" page of the TruLens dashboard.
# ## Or view results directly in your notebook
tru.get_records_and_feedback(app_ids=[]
)[0] # pass an empty list of app_ids to get all
|
10,183 | f4c518e62f0d6797ccef75ce11f2c68082bfd603 | import os
import sys
from flask import Flask
# This is just here for the sake of examples testing
# to make sure that the imports work
# (you don't actually need it in your code)
sys.path.insert(1, ".")
from flask_discord_interactions import (DiscordInteractions, # noqa: E402
Response, ActionRow, Button,
ButtonStyles, Embed, SelectMenu,
SelectMenuOption)
app = Flask(__name__)
discord = DiscordInteractions(app)
app.config["DISCORD_CLIENT_ID"] = os.environ["DISCORD_CLIENT_ID"]
app.config["DISCORD_PUBLIC_KEY"] = os.environ["DISCORD_PUBLIC_KEY"]
app.config["DISCORD_CLIENT_SECRET"] = os.environ["DISCORD_CLIENT_SECRET"]
discord.update_slash_commands()
# In reality, you'd store these values in a database
# For simplicity, we store them as globals in this example
# Generally, this is a bad idea
# https://stackoverflow.com/questions/32815451/
click_count = 0
# The handler edits the original response by setting update=True
# It sets the action for the button with custom_id
@discord.custom_handler()
def handle_click(ctx):
global click_count
click_count += 1
return Response(
content=f"The button has been clicked {click_count} times",
components=[
ActionRow(components=[
Button(
style=ButtonStyles.PRIMARY,
custom_id=handle_click,
label="Click Me!"
)
])
],
update=True
)
# The main command sends the initial Response
@discord.command()
def click_counter(ctx):
"Count the number of button clicks"
return Response(
content=f"The button has been clicked {click_count} times",
components=[
ActionRow(components=[
Button(
style=ButtonStyles.PRIMARY,
custom_id=handle_click,
label="Click Me!"
)
])
]
)
# You can also return a normal message
@discord.custom_handler()
def handle_upvote(ctx):
return f"Upvote by {ctx.author.display_name}!"
@discord.custom_handler()
def handle_downvote(ctx):
return f"Downvote by {ctx.author.display_name}!"
@discord.command()
def voting(ctx, question: str):
"Vote on something!"
return Response(
content=f"The question is: {question}",
components=[
ActionRow(components=[
Button(
style=ButtonStyles.SUCCESS,
custom_id=handle_upvote,
emoji={
"name": "โฌ๏ธ"
}
),
Button(
style=ButtonStyles.DANGER,
custom_id=handle_downvote,
emoji={
"name": "โฌ๏ธ",
}
)
])
]
)
# Ephemeral messages and embeds work
@discord.custom_handler()
def handle_avatar_view(ctx):
return Response(
embed=Embed(
title=f"{ctx.author.display_name}",
description=f"{ctx.author.username}#{ctx.author.discriminator}"
),
ephemeral=True
)
@discord.command()
def username(ctx):
"Show your username and discriminator"
return Response(
content="Show user info!",
components=[
ActionRow(components=[
Button(
style=ButtonStyles.PRIMARY,
custom_id=handle_avatar_view,
label="View User!"
)
])
]
)
# Return nothing for no action
@discord.custom_handler()
def handle_do_nothing(ctx):
print("Doing nothing...")
@discord.command()
def do_nothing(ctx):
return Response(
content="Do nothing",
components=[
ActionRow(components=[
Button(
style=ButtonStyles.PRIMARY,
custom_id=handle_do_nothing,
label="Nothing at all!"
)
])
]
)
# Link buttons don't need a handler
@discord.command()
def google(ctx):
return Response(
content="search engine",
components=[
ActionRow(components=[
Button(
style=ButtonStyles.LINK,
url="https://www.google.com/",
label="Go to google"
)
])
]
)
# Use a list with the Custom ID to include additional state information
# Optionally specify the type (e.g. int) to automatically convert
@discord.custom_handler()
def handle_stateful(ctx, interaction_id, current_count: int):
current_count += 1
return Response(
content=(f"This button has been clicked {current_count} times. "
"Try calling this command multiple times to see--each button "
"count is tracked separately!"),
components=[
ActionRow(components=[
Button(
style=ButtonStyles.PRIMARY,
custom_id=[handle_stateful, interaction_id, current_count],
label="Click Me!"
)
])
],
update=True
)
@discord.command()
def stateful_click_counter(ctx):
"Count the number of button clicks for this specific button."
return Response(
content=f"Click the button!",
components=[
ActionRow(components=[
Button(
style=ButtonStyles.PRIMARY,
custom_id=[handle_stateful, ctx.id, 0],
label="Click Me!"
)
])
]
)
discord.set_route("/interactions")
discord.update_slash_commands(guild_id=os.environ["TESTING_GUILD"])
if __name__ == '__main__':
# Disable threading because of global variables
app.run(threaded=False)
|
10,184 | 02385fbaeaa49c0f51622334d9d1eda8d5c56db1 | nomeVendedor = input()
salario = float(input())
vendas = float(input())
total = salario + vendas * 0.15
print("TOTAL = R$ " + "{:.2f}".format(total)) |
10,185 | fef91fa7ff4f007c889d4a838a67137c06aa7690 | """
This code was implemented by Vojtech Kubac as a part of his Master Thesis that will be defended
on February 2020. At the Faculty of Mathematics and Physics of Charles University in Prague.
"""
"""
This code solves FSI2 and FSI3 Bencmarks from
"S. Turek and J. Hron, โProposal for numerical benchmarking of fluidโ
structure interaction between an elastic object and laminar incompress-
ible flow,โ in Fluid-Structure Interaction - Modelling, Simulation, Opti-
mization, ser. Lecture Notes in Computational Science and Engineering,"
other FSI simulations can be run by loading corresponding mesh and a straightforward
modifications of boundary conditions.
The equations are written in Total-ALE formulation, where for the mesh movement pseudoelasticity
extension of the solid displacement was used.
Chosen Finite Elements are linear discontinuous space for pressure and quadratic continuous space
enriched with quadratic bubble is used for displacement and velocity.
Time discretization scheme is theta-scheme with default value 0.5, which means Crank-Nicolson.
The equation for pressure and incompressibility equation are discretized by implicit Euler.
"""
from dolfin import *
from dolfin import __version__
import mshr
import numpy as np
import csv
import sys
import os.path
from mpi4py.MPI import COMM_WORLD
from optparse import OptionParser
# Define MPI World
if __version__[:4] == '2017':
comm = mpi_comm_world()
else:
comm = MPI.comm_world
my_rank = comm.Get_rank()
# Use UFLACS to speed-up assembly and limit quadrature degree
parameters["std_out_all_processes"] = False
parameters['form_compiler']['representation'] = 'uflacs'
parameters['form_compiler']['optimize'] = True
parameters['form_compiler']['quadrature_degree'] = 4
parameters['ghost_mode'] = 'shared_facet'
PETScOptions.set('mat_mumps_icntl_24', 1) # detects null pivots
PETScOptions.set('mat_mumps_cntl_1', 0.01) # set treshold for partial treshold pivoting, 0.01 is default value
class Problem(NonlinearProblem):
"""
Nonlinear problem for solving System of nonlinear equations that arises from
Finite Elemnt discretization of the equtions describing the FSI phenomenon.
It inherites methods from FEniCS class NonlinearProblem. But redefines methods 'F' and 'J'
in such a way that it nulls elements corresponding to the artificial mesh-moving equation
on the interface with elastic solid. This guarantees that the mesh-moving ALE equation for fluid
does not influence the solution to elasticity displacement.
"""
def __init__(self, F_mesh, FF, dF_mesh, dF, bcs_mesh, bcs):
NonlinearProblem.__init__(self)
self.F_mesh = F_mesh
self.FF = FF
self.dF_mesh = dF_mesh
self.dF = dF
self.bcs_mesh = bcs_mesh
self.bcs = bcs
self.assembler = SystemAssembler(dF+dF_mesh, FF+F_mesh, bcs+bcs_mesh)
self.A1 = PETScMatrix()
self.A2 = PETScMatrix()
def F(self, b, x):
self.assembler.init_global_tensor(b, Form(self.FF+self.F_mesh))
b.apply('add')
b1=Vector()
b2=Vector()
assemble(self.F_mesh, tensor = b1)
[bc.apply(b1) for bc in self.bcs_mesh]
assemble(self.FF, tensor = b2)
b.axpy(1,b1)
b.axpy(1,b2)
[bc.apply(b, x) for bc in self.bcs]
def J(self, A, x):
self.assembler.init_global_tensor(A, Form(self.dF+self.dF_mesh))
A.apply('insert')
assemble(self.dF_mesh, tensor = self.A1, keep_diagonal=True)
[bc.zero(self.A1) for bc in self.bcs_mesh]
assemble(self.dF, tensor = self.A2, keep_diagonal=True)
A.axpy(1, self.A1, False)
A.axpy(1, self.A2, False)
[bc.apply(A) for bc in self.bcs]
class Flow(object):
"""
Class where the equations for the FSI are defined. It possesses methods 'solve' and 'save'
that solves equations in each time step and then saves the obtained results.
"""
def __init__(self, mesh, bndry, interface, dt, theta, v_max, lambda_s, mu_s, rho_s,
mu_f, rho_f, result, *args, **kwargs):
"""
Write boundary conditions, equations and create the files for solution.
"""
self.mesh = mesh
self.dt = Constant(dt)
self.theta = theta
self.t = 0.0
self.v_max = v_max
self.mu_f = mu_f
self.rho_f = rho_f
self.lambda_s = lambda_s
self.mu_s = mu_s
self.rho_s = rho_s
self.bndry = bndry
self.interface = interface
# bounding box tree
self.bb = BoundingBoxTree()
self.bb.build(self.mesh)
# Define finite elements
eV = VectorElement("CG", mesh.ufl_cell(), 2) # velocity element
eB = VectorElement("Bubble", mesh.ufl_cell(), mesh.geometry().dim()+1) # Bubble element
eU = VectorElement("CG", mesh.ufl_cell(), 2) # displacement element
eP = FiniteElement("DG", mesh.ufl_cell(), 1) # pressure element
eW = MixedElement([eV, eB, eU, eB, eP]) # final mixed element
W = FunctionSpace(self.mesh, eW) # mixed space
self.W = W
self.V = FunctionSpace(self.mesh, eV)
# Set boundary conditions
self.v_in = Expression(("t<2.0? 0.5*(1.0 - cos(0.5*pi*t))*v_max*4/(gW*gW)*(x[1]*(gW - x[1])): \
v_max*4/(gW*gW)*(x[1]*(gW - x[1]))", "0.0"),
degree = 2, v_max = Constant(self.v_max), gW = Constant(gW), t = self.t)
#info("Expression set.")
bc_v_in = DirichletBC(self.W.sub(0), self.v_in, bndry, _INFLOW)
bc_v_walls = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _WALLS)
bc_v_circle = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _CIRCLE)
bc_u_in = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _INFLOW)
bc_u_circle = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _CIRCLE)
bc_u_walls = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _WALLS)
bc_u_out = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _OUTFLOW)
self.bcs = [bc_v_in, bc_v_walls, bc_v_circle, bc_u_in, bc_u_walls, bc_u_circle, bc_u_out]
#info("Mesh BC.")
bc_mesh = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), interface, _FSI)
self.bcs_mesh = [bc_mesh]
#info("Normal and Circumradius.")
self.n = FacetNormal(self.mesh)
self.h = Circumradius(self.mesh)
I = Identity(self.W.mesh().geometry().dim())
# Define functions
self.w = Function(self.W) # solution to current time step
self.w0 = Function(self.W) # solution from previous time step
(v__, bv_, u__, bu_, p_) = TestFunctions(self.W)
# sum bubble elements with corresponding Lagrange elements
v_ = v__ + bv_
u_ = u__ + bu_
(v, bv, u, bu, self.p) = split(self.w)
self.v = v + bv
self.u = u + bu
(v0, bv0, u0, bu0, self.p0) = split(self.w0)
self.v0 = v0 + bv0
self.u0 = u0 + bu0
# define deformation gradient, Jacobian
self.FF = I + grad(self.u)
self.FF0 = I + grad(self.u0)
self.JJ = det(self.FF)
self.JJ0 = det(self.FF0)
# write ALE mesh movement
self.gamma = 9.0/8.0
h = CellVolume(self.mesh)**(self.gamma)
E = Constant(1.0)
E_mesh = E/h
nu_mesh = Constant(-0.02)
mu_mesh = E_mesh/(2*(1.0+nu_mesh))
lambda_mesh = (nu_mesh*E_mesh)/((1+nu_mesh)*(1-2*nu_mesh))
F_mesh = inner(mu_mesh*2*sym(grad(self.u)), grad(u_))*dx(0) \
+ lambda_mesh*inner(div(self.u), div(u_))*dx(0)
# define referential Grad and Div shortcuts
def Grad(f, F): return dot( grad(f), inv(F) )
def Div(f, F): return tr( Grad(f, F) )
# approximate time derivatives
du = (1.0/self.dt)*(self.u - self.u0)
dv = (1.0/self.dt)*(self.v - self.v0)
# compute velocuty part of Cauchy stress tensor for fluid
self.T_f = -self.p*I + 2*self.mu_f*sym(Grad(self.v, self.FF))
self.T_f0 = -self.p*I + 2*self.mu_f*sym(Grad(self.v0, self.FF0))
# Compute 1st Piola-Kirhhoff tensro for fluid
# - for computing surface integrals for forces in postprocessing
self.S_f = self.JJ *self.T_f*inv(self.FF).T
# write equations for fluid
a_fluid = inner(self.T_f , Grad(v_, self.FF))*self.JJ*dx(0) \
- inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \
+ inner(self.rho_f*Grad(self.v, self.FF )*(self.v - du), v_)*self.JJ*dx(0)
a_fluid0 = inner(self.T_f0, Grad(v_, self.FF0))*self.JJ0*dx(0) \
- inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \
+ inner(self.rho_f*Grad(self.v0, self.FF0)*(self.v0 - du), v_)*self.JJ0*dx(0)
b_fluid = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)
b_fluid0 = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)
self.F_fluid = (self.theta*self.JJ+(1.0 - self.theta)*self.JJ0)*self.rho_f*inner(dv, v_)*dx(0)\
+ self.theta*(a_fluid + b_fluid) + (1.0 - self.theta)*(a_fluid0 + b_fluid0) \
+ F_mesh
# compute 1st Piola-Kirchhoff tensor for solid (St. Vennant - Kirchhoff model)
B_s = self.FF.T *self.FF
B_s0 = self.FF0.T*self.FF0
S_s = self.FF *(0.5*self.lambda_s*tr(B_s - I)*I + self.mu_s*(B_s - I))
S_s0 = self.FF0*(0.5*self.lambda_s*tr(B_s0 - I)*I + self.mu_s*(B_s0 - I))
# write equation for solid
alpha = Constant(1.0) # Constant(1e10) #
self.F_solid = rho_s*inner(dv, v_)*dx(1) \
+ self.theta*inner(S_s , grad(v_))*dx(1) + (1.0 - self.theta)*inner(S_s0, grad(v_))*dx(1) \
+ alpha*inner(du - (self.theta*self.v + (1.0 - self.theta)*self.v0), u_)*dx(1)
dF_solid = derivative(self.F_solid, self.w)
dF_fluid = derivative(self.F_fluid, self.w)
self.problem = Problem(self.F_fluid, self.F_solid, dF_fluid, dF_solid, self.bcs_mesh, self.bcs)
self.solver = NewtonSolver()
# configure solver parameters
self.solver.parameters['relative_tolerance'] = 1e-6
self.solver.parameters['maximum_iterations'] = 15
self.solver.parameters['linear_solver'] = 'mumps'
# create files for saving
if my_rank == 0:
if not os.path.exists(result):
os.makedirs(result)
self.vfile = XDMFFile("%s/velocity.xdmf" % result)
self.ufile = XDMFFile("%s/displacement.xdmf" % result)
self.pfile = XDMFFile("%s/pressure.xdmf" % result)
self.sfile = XDMFFile("%s/stress.xdmf" % result)
self.vfile.parameters["flush_output"] = True
self.ufile.parameters["flush_output"] = True
self.pfile.parameters["flush_output"] = True
self.sfile.parameters["flush_output"] = True
with open(result+'/data.csv', 'w') as data_file:
writer = csv.writer(data_file, delimiter=';', lineterminator='\n')
writer.writerow(['time', 'mean pressure on outflow', 'pressure_jump',
'x-coordinate of end of beam', 'y-coordinate of end of beam',
'pressure difference',
'drag_circle', 'drag_fluid', 'drag_solid', 'drag_fullfluid',
'lift_circle', 'lift_fluid', 'lift_solid', 'lift_fullfluid'])
def solve(self, t, dt):
self.t = t
self.v_in.t = t
self.dt = Constant(dt)
self.solver.solve(self.problem, self.w.vector())
self.w0.assign(self.w)
def save(self, t):
(v, b1, u, b2, p) = self.w.split()
v.rename("v", "velocity")
u.rename("u", "displacement")
p.rename("p", "pressure")
self.vfile.write(v, t)
self.ufile.write(u, t)
self.pfile.write(p, t)
P = assemble(self.p*ds(_OUTFLOW))/gW
PI = assemble(abs(jump(self.p))*dS(_FSI))
# Compute drag and lift
force = dot(self.S_f, self.n)
D_C = -assemble(force[0]*dss(_FLUID_CYLINDER))
L_C = -assemble(force[1]*dss(_FLUID_CYLINDER))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
D_F = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
L_F = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
D_S = assemble(action(self.F_solid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
L_S = assemble(action(self.F_solid,w_))
w_ = Function(self.W)
Fbc1 = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FLUID_CYLINDER)
Fbc2 = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc1.apply(w_.vector())
Fbc2.apply(w_.vector())
D_FF = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc1 = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FLUID_CYLINDER)
Fbc2 = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc1.apply(w_.vector())
Fbc2.apply(w_.vector())
L_FF = -assemble(action(self.F_fluid,w_))
# MPI trick to extract displacement of the end of the beam
self.w.set_allow_extrapolation(True)
pA_loc = self.p((A.x(), A.y()))
pB_loc = self.p((B.x(), B.y()))
pB_loc = self.p((B.x(), B.y()))
Ax_loc = self.u[0]((A.x(), A.y()))
Ay_loc = self.u[1]((A.x(), A.y()))
self.w.set_allow_extrapolation(False)
pi = 0
if self.bb.compute_first_collision(A) < 4294967295:
pi = 1
else:
pA_loc = 0.0
Ax_loc = 0.0
Ay_loc = 0.0
pA = MPI.sum(comm, pA_loc) / MPI.sum(comm, pi)
Ax = MPI.sum(comm, Ax_loc) / MPI.sum(comm, pi)
Ay = MPI.sum(comm, Ay_loc) / MPI.sum(comm, pi)
pi = 0
if self.bb.compute_first_collision(B) < 4294967295:
pi = 1
else:
pB_loc = 0.0
pB = MPI.sum(comm, pB_loc) / MPI.sum(comm, pi)
p_diff = pB - pA
# write computed quantities to a csv file
if my_rank == 0:
with open(result+'/data.csv', 'a') as data_file:
writer = csv.writer(data_file, delimiter=';', lineterminator='\n')
writer.writerow([t, P, PI, Ax, Ay, p_diff, D_C, D_F, D_S, D_FF, L_C, L_F, L_S, L_FF])
def get_benchmark_specification(benchmark = 'FSI1'):
"""
Method for obtaining the right problem-specific constants.
"""
if benchmark == 'FSI1':
rho_s = Constant(1e03)
nu_s = Constant(0.4)
mu_s = Constant(5e05)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 0.2
T_end = 60.0
result = "results-FSI1/"
elif benchmark == 'FSI2':
rho_s = Constant(1e04)
nu_s = Constant(0.4)
mu_s = Constant(5e05)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 1.0
T_end = 15.0
result = "results-FSI2/"
elif benchmark == 'FSI3':
rho_s = Constant(1e03)
nu_s = Constant(0.4)
mu_s = Constant(2e06)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 2.0
T_end = 20.0
result = "results-FSI3/"
else:
raise ValueError('"{}" is a wrong name for problem specification.'.format(benchmark))
v_max = Constant(1.5*U) # mean velocity to maximum velocity
# (we have parabolic profile)
E_s = Constant(2*mu_s*(1+nu_s))
lambda_s = Constant((nu_s*E_s)/((1+nu_s)*(1-2*nu_s)))
mu_f = Constant(nu_f*rho_f)
return v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, T_end, result
# set problem and its discretization
parser = OptionParser()
parser.add_option("--benchmark", dest="benchmark", default='FSI2')
parser.add_option("--mesh", dest="mesh_name", default='mesh_ALE_L1')
parser.add_option("--dt", dest="dt", default='0.001')
parser.add_option("--dt_scheme", dest="dt_scheme", default='CN') # BE BE_CN
(options, args) = parser.parse_args()
# name of benchmark
benchmark = options.benchmark
# name of mesh
mesh_name = options.mesh_name
relative_path_to_mesh = 'meshes/'+mesh_name+'.h5'
# time step size
dt = options.dt
# time stepping scheme
dt_scheme = options.dt_scheme
# choose theta according to dt_scheme
if dt_scheme in ['BE', 'BE_CN']:
theta = Constant(1.0)
elif dt_scheme == 'CN':
theta = Constant(0.5)
else:
raise ValueError('Invalid argument for dt_scheme')
v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, t_end, result = get_benchmark_specification(benchmark)
result = result + 'dt_' + str(dt) + '/' + dt_scheme + '/' + mesh_name[:-3] + '/' + mesh_name[-2:]
# load mesh with boundary and domain markers
sys.path.append('../meshes')
import marker
#(mesh, bndry, domains, interface, A, B) \
# = marker.give_marked_mesh(mesh_coarseness = mesh_coarseness, refinement = True, ALE = True)
(mesh, bndry, domains, interface, A, B) = marker.give_gmsh_mesh(relative_path_to_mesh)
# domain (used while building mesh) - needed for inflow condition
gW = 0.41
# boundary marks' names (already setted to the mesh) - needed for boundary conditions
_INFLOW = 1
_WALLS = 2
_CIRCLE = 3
_OUTFLOW = 4
# interface marks
_FSI = 1
_FLUID_CYLINDER = 2
dx = dx(domain=mesh, subdomain_data = domains)
ds = ds(domain=mesh, subdomain_data = bndry)
dss = ds(domain=mesh, subdomain_data = interface)
dS = dS(domain=mesh, subdomain_data = interface)
flow = Flow(mesh, bndry, interface, dt, theta, v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, result)
t = 0.0
while t < 2.0:
if my_rank == 0:
info("t = %.4f, t_end = %.1f" % (t, t_end))
flow.solve(t, dt)
flow.save(t)
t += float(dt)
if dt_scheme == 'BE_CN': flow.theta.assign(0.5)
while t < t_end:
if my_rank == 0:
info("t = %.4f, t_end = %.1f" % (t, t_end))
flow.solve(t, dt)
flow.save(t)
t += float(dt)
|
10,186 | 3016046c947b119acdea2272d4aba7c8be79e9dc | # -*- coding:utf-8 -*-
import sys
from . import resnet, inception_v3, dense_net
from .error import ModelError
from lib import storage
_recognizer_cache = {}
_rec_config = {
'AJ#insole': { # ้ๅซ
'model_type': 'resnet',
'model_config': {
'num_epochs': 25,
'fixed_param': False,
'model_type': 18,
'version_in_use': storage.VERSION_LATEST
}
},
'AJ#sole': { # ้ๅบ
'model_type': 'resnet',
'model_config': {
'num_epochs': 25,
'fixed_param': False,
'model_type': 18,
'version_in_use': storage.VERSION_LATEST
}
},
'AJ#body': { # ้่บซ
'model_type': 'resnet',
'model_config': {
'num_epochs': 25,
'fixed_param': False,
'model_type': 18,
'version_in_use': storage.VERSION_LATEST
}
},
'AJ#inner_body': { # ้ๅ
้จ
'model_type': 'resnet',
'model_config': {
'num_epochs': 25,
'fixed_param': False,
'model_type': 18,
'version_in_use': storage.VERSION_LATEST
}
},
'AJ#tongue': { # ้่
'model_type': 'resnet',
'model_config': {
'num_epochs': 25,
'fixed_param': False,
'model_type': 18,
'version_in_use': storage.VERSION_LATEST
}
},
'AJ#shoe_tag': { # ้ๆ
'model_type': 'resnet',
'model_config': {
'num_epochs': 25,
'fixed_param': False,
'model_type': 18,
'version_in_use': storage.VERSION_LATEST
}
},
'AJ#shoebox': { # ้็
'model_type': 'resnet',
'model_config': {
'num_epochs': 25,
'fixed_param': False,
'model_type': 18,
'version_in_use': storage.VERSION_LATEST
}
}
}
def get_recognizer(rec_type, model_type=None, model_config=None, use_cache=True):
if use_cache and rec_type in _recognizer_cache:
return _recognizer_cache[rec_type]
if model_config is None:
model_config = {}
if model_type:
child_module = getattr(sys.modules[__name__], model_type)
else:
conf = _rec_config.get(rec_type, {})
model_type = conf.get('model_type')
if model_type:
child_module = getattr(sys.modules[__name__], model_type)
model_config = {**conf.get('model_config', {}), **model_config}
else:
raise ModelError('model_type ๆชๆๅฎ')
recognizer = getattr(child_module, 'Recognizer')
r = recognizer(rec_type, config=model_config)
if use_cache:
_recognizer_cache[rec_type] = r
return r
|
10,187 | 276b62d567fd7e03cbbe2a18554232f62d0aaed8 | from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.template.loader import get_template
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.contrib import messages
# Create your views here.
def portfolio(request):
return render(request, 'portfolio/portfolio.html')
def partners(request):
return render(request, 'partners/partners.html')
|
10,188 | 161e1d78da717ba10b76a695f9a37d5058397079 | #๋์ผํ ๋ฌผ๊ฑด์ A๋งค์ฅ์์๋ 20%, B๋งค์ฅ์์๋ 10% ํ ์ธ ํ 11% ํ ์ธ(์ค๋ณตํ ์ธ)ํ๋ค. ๋ฌผ๊ฑด์ด 10,000์์ผ ๋ ์ด๋ ์ผํ๋ชฐ์์ ์ธ๊ฒ ์ด ์ ์์๊น?
item_price = int(input("๋ฌผํ์ ๊ฐ๊ฒฉ : "))
#๋ง์ผ A์์์ ํ ์ธ์จ๊ณผ ๋ฌผํ ๊ฐ๊ฒฉ
sale_percent_M_A = float(input("A ๋ง์ผ ํ ์ธ์จ : ")) /100
market_a = item_price*(1-sale_percent_M_A)
print("A ๋งค์ฅ์์์ ํ ์ธ๋ ๋ฌผํ ๊ฐ๊ฒฉ์", market_a, "์ ์ด๋ค")
#๋ง์ผ B์์์ ํ ์ธ์จ๊ณผ ๋ฌผํ ๊ฐ๊ฒฉ
sale_percent_M_B_1 = float(input("B ๋ง์ผ 1์ฐจ ํ ์ธ์จ : ")) /100
sale_percent_M_B_2 = float(input("B ๋ง์ผ 2์ฐจ ํ ์ธ์จ : ")) /100
market_b = item_price*(1-sale_percent_M_B_1)*(1-sale_percent_M_B_2)
print("B ๋งค์ฅ์์์ ํ ์ธ๋ ๋ฌผํ ๊ฐ๊ฒฉ์", market_b, "์ ์ด๋ค")
if market_b > market_a :
print("๋ง์ผ A์์ ๋ฌผํ์ ๊ตฌ์
ํ๋ ๊ฒ์ด ๋ ๋ซ๋ค")
elif market_a == market_b:
print("๋ง์ผ A์ ๋ง์ผ B์์์ ๋ฌผํ ๊ฐ๊ฒฉ์ ๋์ผํ๋ค")
else :
print("๋ง์ผ B์์์ ๋ฌผํ ๊ฐ๊ฒฉ์ด ๋ ์ธ๋ค")
|
10,189 | 4e5277c1808f61695559a3e0042dff509c9bd569 | MEDIA_ROOT_URL = '/'
MEDIA_ROOT = '/var/www/django/Coffee-in-the-Cloud/server/'
|
10,190 | 8f76a676839c3e42b51728756098115abe7bcf56 | # Generated by Django 2.0.3 on 2018-05-15 15:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feedbap', '0006_product'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('orderId', models.IntegerField(primary_key=True, serialize=False)),
('orderDate', models.DateField(auto_now=True)),
('periodOption', models.CharField(default='F', max_length=1)),
('period', models.CharField(max_length=30, null=True)),
('quantity', models.FloatField()),
('totalPrice', models.FloatField(default=0.0)),
('productId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feedbap.Product')),
('serialNum', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feedbap.User')),
],
),
]
|
10,191 | f2162b7a1cd9c62ec0c2ab3456716d603ed6ecac | import logging
import sys
import optuna
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
def objective(trial):
iris = datasets.load_iris()
classes = list(set(iris.target))
train_x, valid_x, train_y, valid_y = train_test_split(iris.data, iris.target, test_size=0.25, random_state=0)
alpha = trial.suggest_float("alpha", 1e-5, 1e-1, log=True)
clf = SGDClassifier(alpha=alpha)
for step in range(100):
clf.partial_fit(train_x, train_y, classes=classes)
# Report intermediate objective value.
intermediate_value = 1.0 - clf.score(valid_x, valid_y)
trial.report(intermediate_value, step)
# Handle pruning based on the intermediate value.
if trial.should_prune():
raise optuna.TrialPruned()
return 1.0 - clf.score(valid_x, valid_y)
# Add stream handler of stdout to show the messages
logger = optuna.logging.get_logger("optuna")
logger.addHandler(logging.StreamHandler(sys.stdout))
study = optuna.create_study(pruner=optuna.pruners.MedianPruner())
study.optimize(objective, n_trials=20)
|
10,192 | 4f8fd40fd2cc91265881b0f4ba83ee5db076c1c6 | import uuid
from functools import wraps
from flask import request
from flask import Response
from flask_restful import Resource
def generate_members(element_name, url, total_num):
members = []
i = 0
while i < total_num:
dic = dict()
dic["@odata.id"] = url + element_name + str(i + 1)
members.append(dic)
i += 1
return members
def generate_uuid_by_element_id(element_id):
return str(uuid.uuid3(uuid.NAMESPACE_DNS, element_id))
def check_auth(username, password):
"""check if a username password combination is valid"""
return username == 'admin' and password == 'Passw0rd'
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials',
401,
{
'WWW-Authenticate': 'Basic realm="Login Required"'
}
)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
class AuthResource(Resource):
method_decorators = [requires_auth]
|
10,193 | 6dde96565f9d7ab34a32fbc93b157301280cf4d7 | from ROOT import TH1F, TH2F
import math
class Particle(object):
def __init__(self, pdg_code, momentum, status):
self.pdg = pdg_code
self.momentum = momentum
self.status = status
self.mass = 0
if abs(self.pdg) == 11:
self.mass = 0.5109989461 / 1000. ## GeV
elif abs(self.pdg) == 13:
self.mass = 105.6583745 / 1000. ## GeV
def mag_momentum(self):
return math.sqrt(sum([i**2 for i in self.momentum]))
def energy(self):
return math.sqrt(self.mass**2 + self.mag_momentum()**2)
class EventMuMu(object):
def __init__(self):
self.muplus = 0
self.muminus = 0
self.nuinc = 0
self.nuout = 0
def AddParticle(self, particle):
if particle.pdg == 13:
self.muminus = particle
print "muminus"
elif particle.pdg == -13:
self.muplus = particle
print "muplus"
elif abs(particle.pdg) == 14:
if particle.status == 1:
self.nuout = particle
print "nuout"
else:
self.nuinc = particle
print "nuinc"
return
class EventEE(object):
def __init__(self):
self.muplus = 0
self.muminus = 0
self.nuinc = 0
self.nuout = 0
def AddParticle(self, particle):
if particle.pdg == 11:
self.muminus = particle
print "muminus"
elif particle.pdg == -11:
self.muplus = particle
print "muplus"
elif abs(particle.pdg) == 14:
if particle.status == 1:
self.nuout = particle
print "nuout"
else:
self.nuinc = particle
print "nuinc"
return
if __name__ == "__main__":
#input_file = open("ArSMmumuDec11.dat", "r")
input_file = open("ArSMeeDec11.dat", "r")
## Skip heading
for line in input_file:
if "<event>" in line:
break
## Define histograms
energy_muplus = TH1F("energy_muplus", "", 40, 0., 20.)
energy_muminus = TH1F("energy_muplus", "", 40, 0., 20.)
energy_nuinc = TH1F("energy_nuinc", "", 40, 0., 20.)
energy_nuout = TH1F("energy_nuout", "", 40, 0., 20.)
corr_energy_mu = TH2F("h2_energies_mu", "", 20, 0., 10., 20, 0., 10.)
## Loop through all the events filling the corresponding histograms
event = EventEE()
for line in input_file:
if "</event>" in line:
## End of event: Fill now the histograms.
energy_muplus.Fill(event.muplus.energy())
energy_muminus.Fill(event.muminus.energy())
energy_nuinc.Fill(event.nuinc.energy())
energy_nuout.Fill(event.nuout.energy())
corr_energy_mu.Fill(event.muplus.energy(), event.muminus.energy())
continue
if "<event>" in line:
## New event: Reset
event = EventEE()
continue
print line
words = line.split()
pdg_code = int(words[0])
status = int(words[1])
momentum = (float(words[2]), float(words[3]), float(words[4])) ## GeV
particle = Particle(pdg_code, momentum, status)
event.AddParticle(particle)
energy_muplus.Draw()
energy_muminus.Draw("same")
energy_nuinc.Draw("same")
energy_nuout.Draw("same")
raw_input("Press any key to continue...")
corr_energy_mu.Draw()
raw_input("Press any key to continue...")
|
10,194 | dfe461bb6043067be580b06aee510a88c74d9b59 | from typing import Dict, Optional
from hidet.ir.expr import Var
from hidet.ir.type import ScalarType
_primitive_variables: Dict[str, Var] = {}
def attach_pool(var):
if '_primitive_variables' not in var.__dict__:
var.__dict__['_primitive_variables'] = _primitive_variables
return var
def thread_idx(dim='x') -> Var:
assert dim in ['x', 'y', 'z']
name = 'threadIdx.{}'.format(dim)
if name not in _primitive_variables:
_primitive_variables[name] = attach_pool(Var(hint=name, type=ScalarType('int32'), name=name))
return _primitive_variables[name]
def block_idx(dim='x') -> Var:
assert dim in ['x', 'y', 'z']
name = 'blockIdx.{}'.format(dim)
if name not in _primitive_variables:
_primitive_variables[name] = attach_pool(Var(hint=name, type=ScalarType('int32'), name=name))
return _primitive_variables[name]
def is_primitive_variable(name: str) -> bool:
return name in _primitive_variables
def get_primitive_variable(name: str) -> Optional[Var]:
if name in _primitive_variables:
return _primitive_variables[name]
else:
return None
|
10,195 | 69b0cc21404fefeb812e2bc636d4076cd15b4cb2 | import time
from django_rq.decorators import job
import django_rq
import rq
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@job
def printsomething(the_thing):
time.sleep(2)
print("Look at me I'm printing something!...{}".format(the_thing))
1 / 0
# def my_handler(job, exc_type, exc_value, traceback):
# job.meta.setdefault('failures', 0)
# job.meta['failures'] += 1
#
# if job.meta['failures'] >= 3:
# logger.warn('job:{} has failed {} times - moving to failed queue'.format(job.id, job.meta['failures']))
# job.save()
# return True
#
# logger.warn('job:{} has failed {} times - retrying'.format(job.id, job.meta['failures']))
#
# django_rq.enqueue(job, timeout=job.timeout)
# return False
#
#
# with rq.Connection():
# q = rq.Queue
# worker = rq.Worker
# worker.push_exc_handler(my_handler)
# worker.work() |
10,196 | d9827d47f018afe2c6d35da2ca35b3b2f4b3de81 | """The class Movie is defined and it contains the details of a movie"""
import webbrowser
class Movie():
"""This class is for stores movie related information with
its attributes.
title: string to store title of a movie.
storyline: string to store storyline of movie.
poster_image_url: string to store url of movie poster.
trailer_youtube_url: string to store url of trailer of a movie."""
def __init__(self, movie_title, storyline, poster_link, trailer_link):
"""The instance variables are initialised"""
self.title = movie_title
self.storyline = storyline
self.poster_image_url = poster_link
self.trailer_youtube_url = trailer_link
"""The instance method show_trailer is defined"""
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url) |
10,197 | 489eb996af35fcfb70558fb3b95f6543551c4300 | # pylint: disable=C0103
# pylint: disable=C0301
# pylint: disable=C0321
'''๊ธฐ๋ณธ๊ตฌ์กฐ
for ๋ณ์ in ๋ฆฌ์คํธ(๋๋ ํํ, ๋ฌธ์์ด):
<์ํํ ๋ฌธ์ฅ1>
<์ํํ ๋ฌธ์ฅ2>
<์ํํ ๋ฌธ์ฅ3>
๋ฆฌ์คํธ๋ ํํ, ๋ฌธ์์ด์ ์ฒซ ๋ฒ์งธ ์์๋ถํฐ ๋ง์ง๋ง ์์๊น์ง ์ฐจ๋ก๋ก ๋ณ์์ ๋์
๋์ด <์ํํ ๋ฌธ์ฅ1> <์ํํ ๋ฌธ์ฅ2> ๋ฑ์ด ์ํ๋๋ค
'''
# ์ ํ์ ์ธ For๋ฌธ
test_list = ['one', 'two', 'three']
for i in test_list:
print(i) #๋ณ์(i)์ ๋ฆฌ์คํธ์ ์ฒซ ๋ฒ์งธ ์์์ธ 'one'์ ๋์
ํ๊ณ ๊ทธ ๋ค์ 'two' ๋ง์ง๋ง์ผ๋ก 'three'๋ฅผ ๋์
ํ๊ณ ์ข
๋ฃํ๋ค.
# ๋ค์ํ For๋ฌธ
a = [(1, 2), (3, 4), (5, 6)] #๋ฆฌ์คํธ์ ์์๊ฐ์ด ํํ
for (first, last) in a: #๊ฐ๊ฐ์ ์์๋ค์ด ์๋์ผ๋ก first์ last๋ผ๋ ๋ณ์์ ๋์
๋๋ค.
print(first + last) #ํํ์์ ์์๋ฅผ ๋ํ ๊ฐ์ ์ถ๋ ฅํ๋ค.
# For๋ฌธ ์์ฉ (์ด 5๋ช
์ ํ์์ด ์ํ์ ๋ณด์๋๋ฐ ์ํ ์ ์๊ฐ 60์ ์ด ๋์ผ๋ฉด ํฉ๊ฒฉ์ด๊ณ ๊ทธ๋ ์ง ์์ผ๋ฉด ๋ถํฉ๊ฒฉ์ด๋ค. ํฉ๊ฒฉ์ธ์ง ๋ถํฉ๊ฒฉ์ธ์ง ๊ฒฐ๊ณผ๋ฅผ ๋ณด์ฌ์ฃผ์์ค.)
marks = [90, 25, 60, 45, 80] #5๋ช
์ ์ํ ์ ์๋ฅผ ๋ฆฌ์คํธ๋ก ํํํจ. ์ฒซ๋ฒ์งธ ํ์์ 90์ ๋ค์ฏ๋ฒ์งธ ํ์์ 80์ ์ด๋ค.
number = 0 #๊ฐ๊ฐ์ ํ์์๊ฒ ๋ฒํธ๋ฅผ ๋ถ์ฌ์ฃผ๊ธฐ ์ํด number๋ผ๋ ๋ณ์ ์ฌ์ฉ
for mark in marks: #์ ์ ๋ฆฌ์คํธ์ธ marks์์ ์ฐจ๋ก๋ก ์ ์๋ฅผ ๊บผ๋ด mark๋ผ๋ ๋ณ์์ ๋์
ํ๊ณ for๋ฌธ ์์ ๋ฌธ์ฅ๋ค์ ์ํํ๋ค.
number = number + 1 #for๋ฌธ์ด ํ ๋ฒ์ฉ ์ํ๋ ๋๋ง๋ค number๋ 1์ฉ ์ฆ๊ฐํ๋ค.
if mark >= 60:
print("%d๋ฒ ํ์์ ํฉ๊ฒฉ์
๋๋ค." % number) #์ ์๊ฐ 60์ ์ด์์ด๋ฉด ํฉ๊ฒฉ ๋ฉ์์ง๋ฅผ ์ถ๋ ฅํ๋ค.
else:
print("%d๋ฒ ํ์์ ๋ถํฉ๊ฒฉ์
๋๋ค." % number) #์ ์๊ฐ 60์ ๋ฏธ๋ง์ด๋ฉด ๋ถํฉ๊ฒฉ ๋ฉ์์ง๋ฅผ ์ถ๋ ฅํ๋ค.
# For๋ฌธ๊ณผ Continue (For๋ฌธ ์์ ๋ฌธ์ฅ์ ์ํํ๋ ๋์ค์ Continue๋ฌธ์ ๋ง๋๋ฉด For๋ฌธ์ ์ฒ์์ผ๋ก ๋์๊ฐ๊ฒ ๋๋ค.)
# ์์ ์์ฉ ์์ ๋ฅผ ๊ทธ๋๋ก ์ด์ฉํด์ 60์ ์ด์์ธ ์ฌ๋์๊ฒ๋ ์ถํ ๋ฉ์์ง๋ฅผ ๋ณด๋ด๊ณ ๋๋จธ์ง ์ฌ๋์๊ฒ๋ ์๋ฌด๋ฐ ๋ฉ์์ง๋ ์ ํ์ง ์๋ ์ฝ๋๋ฅผ ์์ฑํด ๋ณด์.
marks = [90, 25, 60, 45, 80]
number = 0
for mark in marks:
number = number + 1
if mark < 60: continue #์ ์๊ฐ 60์ ์ดํ์ธ ํ์์ผ ๊ฒฝ์ฐ์๋ mark < 60์ด ์ฐธ์ด ๋์ด Continue๋ฌธ์ด ์ํ๋๋ค.
#๋ฐ๋ผ์ ์ถํ๋ฉ์์ง๋ฅผ ์ถ๋ ฅํ๋ ๋ถ๋ถ์ธ print๋ฌธ์ ์ํํ์ง ์๊ณ For๋ฌธ์ ์ฒ์์ผ๋ก ๋์๊ฐ๊ฒ ๋๋ค.
print("%d๋ฒ ํ์ ์ถํํฉ๋๋ค. ํฉ๊ฒฉ์
๋๋ค." % number)
# For์ ํจ๊ป ์์ฃผ ์ฌ์ฉํ๋ Range ํจ์
a = range(10) #rangeํจ์๋ฅผ ์ด์ฉํด 0๋ถํฐ 10๋ฏธ๋ง์ ์ซ์๋ฅผ ํฌํจํ๋ range ๊ฐ์ฒด๋ฅผ ์์ฑ
print(a)
a = range(1, 11) #1์ด ์์ ์ซ์, 11์ ๋ ์ซ์์ธ๋ฐ ์ด๋ ๋ ์ซ์๋ ๊ฐ์ฒด์ ํฌํจ๋์ง ์๋๋ค.
print(a)
# For์ Range ํจ์๋ฅผ ์ด์ฉํด 1๋ถํฐ 10๊น์ง ๋ํ๊ธฐ
sum = 0
for i in range(1, 11): #์ซ์ 1๋ถํฐ 10๊น์ง(1์ด์ 11๋ฏธ๋ง)์ ์ซ์๋ฅผ i๋ณ์์ ํ๋์ฉ ์ฐจ๋ก๋ก ๋์
ํ๋ค
sum = sum + i
print(sum)
# ์์ ์์ฉ ์์ ๋ฅผ Range ํจ์๋ฅผ ์ด์ฉํด ์์ฑ
marks = [90, 25, 60, 45, 80]
for number in range(len(marks)): #len ํจ์๋ ๋ฆฌ์คํธ ๋ด ์์์ ๊ฐ์๋ฅผ ๋ฐํํ๋ ํจ์์ด๋ค. ๋ฐ๋ผ์ len(marks)๋ 5๊ฐ๋๊ณ ์ต์ข
์ ์ผ๋ก range(5)๋ฅผ ์๋ฏธํ๋ค
if marks[number] < 60: continue #number ๋ณ์์๋ ์ฐจ๋ก๋ก 0๋ถํฐ 4๊น์ง์ ์ซ์๊ฐ ๋์
๋๋ค. ์ฆ marks[0]๋ฒ์งธ ์์์ธ 90์ ๋ถํฐ ๋์
๋๋ค
print("%d๋ฒ ํ์ ์ถํํฉ๋๋ค. ํฉ๊ฒฉ์
๋๋ค." % (number + 1))
# For์ Range ํจ์๋ฅผ ์ด์ฉํ ๊ตฌ๊ตฌ๋จ ๊ตฌํ
for i in range(2, 10): #2๋ถํฐ 9๊น์ง์ ์ซ์๊ฐ ๋ณ์ i์ ๋์
๋๋ค.
for j in range(1, 10): #1๋ถํฐ 9๊น์์ ์ซ์๊ฐ ๋ณ์ j์ ๋์
๋๋ค.
print(i * j, end=" ") #๋ณ์ i์ j๋ฅผ ๊ณฑํ ๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅํ๋ค. end๋ฅผ ๋ฃ์ด์ค ์ด์ ๋ ํด๋น ๊ฒฐ๊ณผ๊ฐ์ ์ถ๋ ฅํ ๋ ๋ค์์ค๋ก ๋๊ธฐ์ง ์๊ณ ๊ฐ์ ์ค์ ์ถ๋ ฅํ๊ธฐ ์ํด์๋ค.
print('') #2๋จ, 3๋จ ๋ฑ์ ๊ตฌ๋ถํ๊ธฐ ์ํด for๋ฌธ์ด ๋๋๋ฉด ๊ฒฐ๊ณผ ๊ฐ์ ๋ค์ ์ค๋ถํฐ ์ถ๋ ฅํ๊ฒ ํด์ฃผ๋ ๋ฌธ์ฅ์ด๋ค.
# ๋ฆฌ์คํธ ์์ For๋ฌธ ํฌํจํ๊ธฐ
a = [1, 2, 3, 4]
result = []
for num in a:
result.append(num*3)
print(result) #a๋ผ๋ ๋ฆฌ์คํธ์ ๊ฐ ํญ๋ชฉ์ 3์ ๊ณฑํ ๊ฒฐ๊ณผ๋ฅผ result๋ผ๋ ๋ฆฌ์คํธ์ ๋ด์๋ผ
# List comprehension๋ฅผ ์ด์ฉํ๋ฉด ์กฐ๊ธ๋ ํธ๋ฆฌํ๊ณ ์ง๊ด์ ์ธ ์ฝ๋๋ฅผ ์์ฑํ ์ ์๋ค
a = [1, 2, 3, 4]
result = [num * 3 for num in a]
print(result)
# ์ง์์๋ง 3์ ๊ณฑํ๊ธฐ
a = [1, 2, 3, 4]
result = [num * 3 for num in a if num % 2 == 0] #๊ฒฐ๊ณผ๊ฐ ์ค 2๋ก ๋๋ ๋๋จธ์ง๊ฐ 0์ธ ๊ฒ๋ง ์ถ๋ ฅํด๋ผ
print(result)
'''List comprehension์ ๊ธฐ๋ณธ ๋ฌธ๋ฒ
[ํํ์ for ํญ๋ชฉ1 in ๋ฐ๋ณต๊ฐ๋ฅ๊ฐ์ฒด1 if ์กฐ๊ฑด1]
[ํํ์ for ํญ๋ชฉ2 in ๋ฐ๋ณต๊ฐ๋ฅ๊ฐ์ฒด2 if ์กฐ๊ฑด2]
...
[ํํ์ for ํญ๋ชฉn in ๋ฐ๋ณต๊ฐ๋ฅ๊ฐ์ฒดn if ์กฐ๊ฑดn]
'''
# ๊ตฌ๊ตฌ๋จ์ ๋ชจ๋ ๊ฒฐ๊ณผ๋ฅผ ๋ฆฌ์คํธ์ ๋ด์๋ผ
result = [x*y for x in range(2, 10) for y in range(1, 10)]
print(result)
|
10,198 | b6dc82ad3b278b49dd28ba9b5de8df31ddfd2562 | import sys
import glob
# sys.path.append('generated')
# sys.path.insert(0, glob.glob('../../lib/py/build/lib*')[0])
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TMultiplexedProtocol
from generated.bank.ds.agh import AccountService
from generated.bank.ds.agh import AccountManagement
from generated.bank.ds.agh import PremiumAccountService
from generated.bank.ds.agh.ttypes import *
currencies = ["PLN", "USD", "EUR", "CHF", "GBP"]
def initThrift(host, port):
transport = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
aM = AccountManagement.Client(TMultiplexedProtocol.TMultiplexedProtocol(protocol, "manager"))
aS = AccountService.Client(TMultiplexedProtocol.TMultiplexedProtocol(protocol, "standard"))
paS = PremiumAccountService.Client(TMultiplexedProtocol.TMultiplexedProtocol(protocol, "premium"))
transport.open()
return transport, aM, aS, paS
if __name__ == '__main__':
port = int(input("Enter your bank port number"))
trans, AM, AS, PAS = initThrift("0.0.0.0", port)
run_flag = True
while (run_flag):
cmd = input("Enter command : create, info, credit, exit")
if cmd == 'exit':
run_flag = False
trans.close()
elif cmd == "create":
acc_info = input("Enter : \"pesel;firstname;lastname;income;baseCurrency\"")
p, f, l, i, b = acc_info.split(";")
i = float(i)
acc = Account(p, f, l, i, b)
response = AM.createAccount(acc)
print(response)
elif cmd == "info":
p = input("Enter your pesel")
response = AS.getAccountDetails(p)
print(response)
elif cmd == "credit":
p = input("Enter your pesel")
credit_info = input("Enter credit parrameters : \"currency;cost;startdate(year-month);enddate\"")
cur, c, start, stop = credit_info.split(";")
c = float(c)
start = start.split("-")
stop = stop.split("-")
start = ThriftDate(int(start[0]), int(start[1]))
stop = ThriftDate(int(stop[0]), int(stop[1]))
credit_param = CreditParameters(cur, c, start, stop)
response = PAS.getCreditCosts(p, credit_param)
print(response)
else:
print("Incorrect command")
|
10,199 | abee5d28776cf5fad4daf2b700f68cda3dfd21f3 | import datetime
import io
import os
import sys
import uuid
from urllib.parse import unquote_plus
from PIL import Image
from . import storage
client = storage.storage.get_instance()
# Disk-based solution
#def resize_image(image_path, resized_path, w, h):
# with Image.open(image_path) as image:
# image.thumbnail((w,h))
# image.save(resized_path)
# Memory-based solution
def resize_image(image_bytes, w, h):
with Image.open(io.BytesIO(image_bytes)) as image:
image.thumbnail((w,h))
out = io.BytesIO()
image.save(out, format='jpeg')
# necessary to rewind to the beginning of the buffer
out.seek(0)
return out
def handler(event):
input_bucket = event.get('bucket').get('input')
output_bucket = event.get('bucket').get('output')
key = unquote_plus(event.get('object').get('key'))
width = event.get('object').get('width')
height = event.get('object').get('height')
# UUID to handle multiple calls
#download_path = '/tmp/{}-{}'.format(uuid.uuid4(), key)
#upload_path = '/tmp/resized-{}'.format(key)
#client.download(input_bucket, key, download_path)
#resize_image(download_path, upload_path, width, height)
#client.upload(output_bucket, key, upload_path)
download_begin = datetime.datetime.now()
img = client.download_stream(input_bucket, key)
download_end = datetime.datetime.now()
process_begin = datetime.datetime.now()
resized = resize_image(img, width, height)
resized_size = resized.getbuffer().nbytes
process_end = datetime.datetime.now()
upload_begin = datetime.datetime.now()
key_name = client.upload_stream(output_bucket, key, resized)
upload_end = datetime.datetime.now()
download_time = (download_end - download_begin) / datetime.timedelta(microseconds=1)
upload_time = (upload_end - upload_begin) / datetime.timedelta(microseconds=1)
process_time = (process_end - process_begin) / datetime.timedelta(microseconds=1)
return {
'result': {
'bucket': output_bucket,
'key': key_name
},
'measurement': {
'download_time': download_time,
'download_size': len(img),
'upload_time': upload_time,
'upload_size': resized_size,
'compute_time': process_time
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.