text stringlengths 38 1.54M |
|---|
H = 1.
kernel_lr_multiplier = 'Glorot'
data_format = 'channels_last'
# nn
batch_size = 32
epochs = 20
channels = 1
img_rows = 28
img_cols = 28
filters = 32
kernel_size = (3, 3)
pool_size = (2, 2)
hidden_units = 128
classes = 10
use_bias = False
# learning rate schedule
lr_start = 1e-3
lr_end = 1e-4
lr_decay = (lr_end / lr_start)**(1. / epochs)
# BN
epsilon = 1e-6
momentum = 0.9
# dropout
p1 = 0.25
p2 = 0.5
|
import json
import datetime
import os
data_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data.json'))
output_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../README.md'))
with open(data_path, 'r') as f:
datastore = json.load(f)
YEAR = 2020
headers = [
f"# Advent of Code {YEAR}",
"",
"My solutions to this year's problems linked [here](https://adventofcode.com/2020)",
"",
"## Progress",
"",
]
max_len_c1, max_len_c2, max_len_c3, max_len_c4 = 3,0,0,0
for day, info in datastore.items():
name, rank1, rank2 = info['name'], str(info['part1']), str(info['part2'])
name = f'[{name}](https://adventofcode.com/{YEAR}/day/{int(day)})'
max_len_c1 = max(max_len_c1, len(day))
max_len_c2 = max(max_len_c2, len(name))
max_len_c3 = max(max_len_c3, len(rank1))
max_len_c4 = max(max_len_c4, len(rank2))
max_len_c2 += 5
max_len_c3 += 4
max_len_c4 += 4
table = [
['Day', 'Problem', 'Part One', 'Part Two'],
[':' + '-'*(x-2)+':' for x in [max_len_c1, max_len_c2, max_len_c3, max_len_c4]],
]
for day, info in datastore.items():
day = day
name = info['name']
name = f'[{name}](https://adventofcode.com/{YEAR}/day/{int(day)})'
rank1 = str(info['part1'])
rank2 = str(info['part2'])
if rank1 == "-1" or rank2 == "-1":
rank1 = " "
rank2 = " "
table.append([day, name, rank1, rank2])
with open(output_path, 'w') as f:
for header in headers:
f.write(header + '\n')
for row in table:
row = ' | '.join([
row[0].ljust(max_len_c1, ' '),
row[1].ljust(max_len_c2, ' '),
row[2].ljust(max_len_c3, ' '),
row[3].ljust(max_len_c4, ' '),
""
])
f.write(row + '\n')
f.write(f'\n\nAuto-Generated at {datetime.datetime.now()}') |
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional, Type, Union
import numpy as np
import torch
from .intrinsics import CameraIntrinsics, IntrinsicsParamsDefEnum, CameraFOV, \
up_to_homogeneous, down_from_homogeneous, default_dtype
__all__ = [
'PinholeIntrinsics'
]
class PinholeParamsDefEnum(IntrinsicsParamsDefEnum):
x0 = 0 # Principal point offset (x), by default assumes offset from the canvas center
y0 = 1 # Principal point offset (y), by default assumes offset from the canvas center
focal_x = 2 # Focal length (x), measured in pixels
focal_y = 3 # Focal length (y), measured in pixels
# Following common practice, the axis skew of pinhole cameras is always assumed to be zero
class PinholeIntrinsics(CameraIntrinsics):
r"""Holds the intrinsics parameters of a pinhole camera:
how it should project from camera space to normalized screen / clip space.
The intrinsics parameters are used to define the lens attributes of the perspective projection matrix.
The pinhole camera explicitly exposes the projection transformation matrix.
This may typically be useful for rasterization based rendering pipelines (i.e: OpenGL).
See documentation of CameraIntrinsics for numerous ways of how to use this class.
Kaolin assumes a left handed NDC coordinate system: after applying the projection matrix,
the depth increases inwards into the screen.
The complete perspective matrix can be described by the following factorization:
.. math::
\text{FullProjectionMatrix} = &\text{Ortho} \times \text{Depth Scale} \times \text{Perspective} \\
= &\begin{bmatrix}
2/(r-l) & 0 & 0 & tx \\
0 & 2/(t-b) & 0 & ty \\
0 & 0 & -2/(f-n) & tz \\
0 & 0 & 0 & 1
\end{bmatrix} \\
\times &\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & B & A \\
0 & 0 & 0 & -1
\end{bmatrix} \\
\times &\begin{bmatrix}
\text{focal_x} & 0 & -x0 & 0 \\
0 & \text{focal_y} & -y0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0
\end{bmatrix} \\
= \begin{bmatrix}
2*\text{focal_x}/(r - l) & 0 & -2x0/(r - l) - tx & 0 \\
0 & 2*\text{focal_y}/(t - b) & -2y0/(t - b) - ty & 0 \\
0 & 0 & V & U \\
0 & 0 & -1 & 0
\end{bmatrix}
where:
- **focal_x**, **focal_y**, **x0**, **y0**: are the intrinsic parameters of the camera
The focal length, together with the image plane width / height,
determines the field of view (fov). This is the effective lens zoom of the scene.
The principal point offsets: x0, y0 allow another DoF to translate the origin of the image plane.
By default, kaolin assumes the NDC origin is at the canvas center (see projection_matrix())
- **n**, **f**: are the near and far clipping planes,
which define the min / max depth of the view frustum.
The near and far planes are also used to normalize the depth values to
normalized device coordinates (see :func:`ndc_matrix()` documentation).
- **r**, **l**, **t**, **b**: are the right, left, top and bottom borders of the
view frustum, and are defined by the perspective
fov (derived from the focal length) and image plane dimensions.
- **tx**, **ty**, **tz**: are defined as:
:math:`tx = -(r + l) / (r - l)`
:math:`ty = -(t + b) / (t - b)`
:math:`tz = -(f + n) / (f - n)`
- **U**, **V**: are elements which define the NDC range, see :func:`ndc_matrix()` for
an elaboration on how these are defined.
- **A**, **B**: can be reverse engineered from U, V and are uniquely defined by them
(and in fact serve a similar function).
This matrix sometimes appear in the literature in a slightly simplified form, for example,
if the principal point offsets x0 = 0, y0 = 0 and the
NDC coords are defined in the range :math:`[-1, 1]`:
.. math::
\begin{bmatrix}
2*\text{focal_x}/(r-l) & 0 & -tx & 0 \\
0 & 2*\text{focal_y}/(t - b) & -ty & 0 \\
0 & 0 & V & U \\
0 & 0 & -1 & 0
\end{bmatrix}
The resulting vector multiplied by this matrix is in homogeneous clip space,
and requires division by the 4th
coordinate (w) to obtain the final NDC coordinates.
Since the choice of NDC space is application dependent,
kaolin maintains the separation of Perspective matrix,
which depends only on the choice of intrinsic parameters from the Depth Scale and Ortho matrices,
(which are squashed together to define the view frustum and NDC range).
.. seealso::
:func:`perspective_matrix()` and :func:`ndc_matrix()` functions.
This class is batched and may hold information from multiple cameras.
Parameters are stored as a single tensor of shape :math:`(\text{num_cameras}, 4)`.
The matrix returned by this class supports differentiable torch operations,
which in turn may update the intrinsic parameters of the camera.
"""
# Default near / far values are best used for small / medium scale scenes.
# Use cases with bigger scale should use other explicit values.
DEFAULT_NEAR = 1e-2
DEFAULT_FAR = 1e2
def __init__(self, width: int, height: int, params: torch.Tensor,
near: float = DEFAULT_NEAR, far: float = DEFAULT_FAR):
super().__init__(width, height, params, near, far)
@classmethod
def param_types(cls) -> Type[IntrinsicsParamsDefEnum]:
"""
Returns:
(IntrinsicsParamsDefEnum):
an enum describing each of the intrinsic parameters managed by the pinhole camera.
This enum also defines the order in which values are kept within the params buffer.
"""
return PinholeParamsDefEnum
@property
def lens_type(self) -> str:
return 'pinhole'
@classmethod
def from_focal(cls,
width: int, height: int,
focal_x: float, focal_y: Optional[float] = None,
x0: Optional[float] = None, y0: Optional[float] = None,
near: float = DEFAULT_NEAR, far: float = DEFAULT_FAR,
num_cameras: int = 1,
device: Union[torch.device, str] = None,
dtype: torch.dtype = default_dtype) -> PinholeIntrinsics:
"""Constructs a new instance of PinholeIntrinsics from focal length
Args:
width (int): width of the camera resolution
height (int): height of the camera resolution
focal_x (float): focal on x-axis
focal_y (optional, float): focal on y-axis. Default: same that focal_x
x0 (optional, float): horizontal offset from origin of the image plane (by default the center). Default: 0.
y0 (optional, float): vertical offset origin of the image place (by default the center). Default: 0.
near (optional, float):
near clipping plane, define the min depth of the view frustrum
or to normalize the depth values. Default: 1e-2
far (optional, float):
far clipping plane, define the max depth of teh view frustrum
or to normalize the depth values. Default: 1e2
num_cameras (optional, int): the numbers of camera in this object. Default: 1
device (optional, str): the device on which parameters will be allocated. Default: cpu
dtype (optional, str): the dtype on which parameters will be alloacted. Default: torch.float
Returns:
(PinholeInstrinsics): the constructed pinhole camera intrinsics
"""
if x0 is None:
x0 = 0.0
if y0 is None:
y0 = 0.0
focal_y = focal_y if focal_y else focal_x
params = cls._allocate_params(x0, y0, focal_x, focal_y, num_cameras=num_cameras, device=device, dtype=dtype)
return PinholeIntrinsics(width, height, params, near, far)
@classmethod
def from_fov(cls, width: int, height: int,
fov: float, fov_direction: CameraFOV = CameraFOV.VERTICAL,
x0: Optional[float] = 0., y0: Optional[float] = 0.,
near: float = DEFAULT_NEAR, far: float = DEFAULT_FAR,
num_cameras: int = 1,
device: Union[torch.device, str] = None,
dtype: torch.dtype = default_dtype) -> PinholeIntrinsics:
"""
Constructs a new instance of PinholeIntrinsics from field of view
Args:
width (int): width of the camera resolution
height (int): height of the camera resolution
fov (float): the field of view, in radians
fov_direction (optional, CameraFOV): the direction of the field-of-view
x0 (optional, float): horizontal offset from origin of the image plane (by default the center). Default: 0.
y0 (optional, float): vertical offset origin of the image place (by default the center). Default: 0.
near (optional, float):
near clipping plane, define the min depth of the view frustrum
or to normalize the depth values. Default: 1e-2
far (optional, float):
far clipping plane, define the max depth of teh view frustrum
or to normalize the depth values. Default: 1e2
num_cameras (optional, int): the numbers of camera in this object. Default: 1
device (optional, str): the device on which parameters will be allocated. Default: cpu
dtype (optional, str): the dtype on which parameters will be alloacted. Default: torch.float
Returns:
(PinholeInstrinsics): the constructed pinhole camera intrinsics
"""
assert fov_direction in (CameraFOV.HORIZONTAL, CameraFOV.VERTICAL),\
"fov direction must be vertical or horizontal"
tanHalfAngle = np.tan(fov / 2.0)
aspectScale = width / 2.0 if fov_direction is CameraFOV.HORIZONTAL else height / 2.0
focal = aspectScale / tanHalfAngle
params = cls._allocate_params(x0, y0, focal, focal, num_cameras=num_cameras, device=device, dtype=dtype)
return PinholeIntrinsics(width, height, params, near, far)
def perspective_matrix(self) -> torch.Tensor:
r"""Constructs a matrix which performs perspective projection from camera space to homogeneous clip space.
The perspective matrix embeds the pinhole camera intrinsic parameters,
which together with the near / far clipping planes specifies how the view-frustum should be transformed
into a cuboid-shaped space. The projection does not affect visibility of objects,
but rather specifies how the 3D world should be down-projected to a 2D image.
This matrix does not perform clipping and is not concerned with NDC coordinates, but merely
describes the perspective transformation itself.
This leaves this matrix free from any api specific conventions of the NDC space.
When coupled with :func:`ndc_matrix()`, the combination of these two matrices produces a complete perspective
transformation from camera space to NDC space, which by default is aligned to traditional OpenGL standards.
See also :func:`projection_matrix()`, which produces a squashed matrix of these two operations together.
The logic essentially builds an torch autodiff compatible equivalent of the following tensor:
.. math::
\text{perspective_matrix} = \begin{bmatrix}
\text{focal_x} & 0. & -x0 & 0. \\
0. & \text{focal_y} & -y0 & 0. \\
0. & 0. & 0. & 1. \\
0. & 0. & 1. & 0.
\end{bmatrix}
which is a modified form of the intrinsic camera matrix:
.. math::
\begin{bmatrix}
\text{focal_x} & 0. & x0 \\
0. & \text{focal_y} & y0 \\
0. & 0. & 1.
\end{bmatrix}
Returns:
(torch.Tensor): The perspective matrix, of shape :math:`(\text{num_cameras}, 4, 4)`
"""
zero = torch.zeros_like(self.focal_x)
one = torch.ones_like(self.focal_x)
rows = [
torch.stack([self.focal_x, zero, -self.x0, zero], dim=-1),
torch.stack([zero, self.focal_y, -self.y0, zero], dim=-1),
torch.stack([zero, zero, zero, one], dim=-1),
torch.stack([zero, zero, one, zero], dim=-1)
]
persp_mat = torch.stack(rows, dim=1)
return persp_mat
def ndc_matrix(self, left, right, bottom, top, near, far) -> torch.Tensor:
r"""Constructs a matrix which performs the required transformation to project the scene onto the view frustum.
(that is: it normalizes a cuboid-shaped view-frustum to clip coordinates, which are
SCALED normalized device coordinates).
When used in conjunction with a :func:`perspective_matrix()`, a transformation from camera view space to
clip space can be obtained.
.. seealso::
projection_matrix() which combines both operations.
.. note::
This matrix actually converts coordinates to clip space, and requires an extra division by the w
coordinates to obtain the NDC coordinates. However, it is named **ndc_matrix** as the elements are chosen
carefully according to the definitions of the NDC space.
Vectors transformed by this matrix will reside in the kaolin clip space,
which is left handed (depth increases in the direction that goes inwards the screen)::
Y Z
^ /
| /
|---------> X
The final NDC coordinates can be obtained by dividing each vector by its w coordinate (perspective division).
!! NDC matrices depends on the choice of NDC space, and should therefore be chosen accordingly !!
The ndc matrix is a composition of 2 matrices which define the view frustum:
.. math::
ndc &= Ortho \times Depth Scale \\
&= \begin{bmatrix}
2. / (r - l) & 0. & 0. & tx \\
0. & 2. / (t - b) & 0. & ty \\
0. & 0. & -2. / (\text{far} - \text{near}) & tz \\
0. & 0. & 0. & 1.
\end{bmatrix}
\times \begin{bmatrix}
1. & 0. & 0. & 0. \\
0. & 1. & 0. & 0. \\
0. & 0. & B & A \\
0. & 0. & 0. & 1.
\end{bmatrix} \\
&= \begin{bmatrix}
2. / (r - l) & 0. & 0. & -tx \\
0. & 2. / (t - b) & 0. & -ty \\
0. & 0. & U & V \\
0. & 0. & 0. & -1.
\end{bmatrix}
- **n**, **f**: are the near and far clipping planes,
which define the min / max depth of the view frustum.
The near and far planes are also used to normalize the depth values to
normalized device coordinates.
- **r**, **l**, **t**, **b**: are the right, left, top and bottom borders of the
view frustum, and are defined by the perspective
fov (derived from the focal length) and image plane dimensions.
- **tx**, **ty**, **tz**: are defined as:
:math:`tx = -(r + l) / (r - l)`
:math:`ty = -(t + b) / (t - b)`
:math:`tz = -(f + n) / (f - n)`
- **U**, **V**: are elements which define the NDC range.
- **A**, **B**: can be reverse engineered from U, V and are uniquely defined by them
(and in fact serve a similar function).
Input values are determined by the screen dimensions and intrinsic coordinate conventions,
for example:
1) :math:`(\text{left}=0, \text{right}=\text{width}, \text{bottom}=\text{height}, \text{top}=0)`
for origin at top-left of the screen, y axis pointing downwards.
2) :math:`(\text{left}=-\dfrac{\text{width}}{2}, \text{right}=\dfrac{\text{width}}{2},
\text{bottom}=-\dfrac{\text{height}}{2}, \text{top}=\dfrac{\text{height}}{2})`
for origin at center of the screen, and y axis pointing upwards.
Args:
left (float): location of the left face of the view-frustum.
right (float): location of the right face of the view-frustum.
bottom (float): location of the bottom face of the view-frustum.
top (float): location of the top face of the view-frustum.
near (float):
location of the near face of the view-frustum.
Should always be larger than zero and smaller than the far clipping plane.
If used in conjunction with a perspective matrix,
the near clipping plane should be identical for both.
far (float):
location of the near face of the view-frustum.
Should always be larger than the near clipping plane.
If used in conjunction with a perspective matrix,
the far clipping plane should be identical for both.
Returns:
(torch.Tensor): the ndc matrix, of shape :math:`(1, 4, 4)`.
"""
tx = -(right + left) / (right - left)
ty = -(top + bottom) / (top - bottom)
# tz = -(far + near) / (far - near) # Not used explicitly here, but makes easier to follow derivations
# Some examples of U,V choices to control the depth of the NDC space obtained:
# ------------------------------------------------------------------------------------------------------
# | NDC in [-1, 1] | U = -2.0 * near * far / (far - near) | i.e. OpenGL NDC space
# | | V = -(far + near) / (far - near) |
# | | |
# ------------------------------------------------------------------------------------------------------
# | NDC in [1, 0] | U = (near * far) / (far - near) | Reverse depth for better fp precision
# | | V = near / (far - near) |
# | | |
# ------------------------------------------------------------------------------------------------------
# | NDC in [0, 1] | U = (near * far) / (near - far) |
# | | V = far / (far - near) |
# | | |
# ------------------------------------------------------------------------------------------------------
# Why? Vectors coming from camera space are first multiplied by the perspective matrix:
# (they're assumed to be homogeneous, where w = 1)
# [focal_x, 0.0, -x0, 0.0] @ [ x ] = [ ... ]
# [0.0, focal_y, -y0, 0.0] [ y ] [ ... ]
# [0.0, 0.0, 0.0, 1.0] [ z ] [ 1 ]
# [0.0, 0.0, 1.0, 0.0] [ 1 ] [ z ]
#
# and next we convert them to clip space by using the matrix calculated below:
# [2.0 / (r - l), 0.0, 0.0, -tx ] @ [ ... ] = [ .. ]
# [0.0, 2.0 / (t - b), 0.0, -ty ] [ ... ] [ .. ]
# [0.0, 0.0, U, V ] [ 1 ] [ U + Vz ]
# [0.0, 0.0, 0.0, -1.0 ] [ z ] [ -z ]
#
# the last step to move from clip space to ndc space involves perspective division: we divide by w.
# [ .. ] / (-z) [ .. ]
# [ .. ] =========> [ .. ]
# [ U + Vz ] persp. div [ -U/z - V ]
# [ -z ] [ 1 ]
#
# And we obtain:
# z_ndc = -U / z - V
#
# We want to map specific values of z, the near and far planes, to specific NDC values
# (for example, such that near --> -1, far --> 1 ).
#
# kaolin assumes a left handed NDC space (depth goes inwards the screen), so we substitute
# z = -near and z = -far in the equation above, paired with the requested z_ndc values.
# A simple linear system of 2 equations is obtained, that once solved, yields U and V.
# -1 = -U / (-n) - V
# 1 = -U / (-f) - V
if self.ndc_min == -1 and self.ndc_max == 1:
U = -2.0 * near * far / (far - near)
V = -(far + near) / (far - near)
elif self.ndc_min == 0 and self.ndc_max == 1:
U = (near * far) / (near - far)
V = far / (far - near)
elif self.ndc_min == 1 and self.ndc_max == 0:
U = (near * far) / (far - near)
V = near / (far - near)
else:
raise NotImplementedError('Perspective Projection does not support NDC range of '
f'[{self.ndc_min}, {self.ndc_max}]')
# The matrix is non differentiable, as NDC coordinates are a fixed standard set by the graphics api
ndc_mat = self.params.new_tensor([
[2.0 / (right - left), 0.0, 0.0, -tx ],
[0.0, 2.0 / (top - bottom), 0.0, -ty ],
[0.0, 0.0, U, V ],
[0.0, 0.0, 0.0, -1.0]
], dtype=self.dtype)
# Add batch dim, to allow broadcasting
return ndc_mat.unsqueeze(0)
def projection_matrix(self) -> torch.Tensor:
r"""Creates an OpenGL compatible perspective projection matrix to clip coordinates.
This is the default perspective projection matrix used by kaolin: it assumes the NDC origin is at the
center of the canvas (hence x0, y0 offsets are measured relative to the center).
Return:
(torch.Tensor): the projection matrix, of shape :math:`(\text{num_cameras}, 4, 4)`
"""
# Obtain perspective projection matrix to non-NDC coordinates
# The axis-skew is assumed to be negligible (m01 of the matrix is zero)
persp_matrix = self.perspective_matrix()
# Compute view frustum components, for conversion to clip / NDC coordinates.
# By default, kaolin follows OpenGL conventions of NDC in [-1, 1],
# where the center of the canvas is denoted as (0, 0)
# The following lines ensure the projection matrix is compatible with OpenGL.
# Practitioners using a different graphics api may modify this matrix.
top = self.height / 2
bottom = -top
right = self.width / 2
left = -right
ndc = self.ndc_matrix(left, right, bottom, top, self.near, self.far)
# Squash matrices together to form complete perspective projection matrix which maps to NDC coordinates
proj = ndc @ persp_matrix
return proj
def project(self, vectors: torch.Tensor) -> torch.Tensor:
r"""
Applies perspective projection to obtain Clip Coordinates
(this function does not perform perspective division the actual Normalized Device Coordinates).
Assumptions:
* Camera is looking down the negative "z" axis
(that is: camera forward axis points outwards from screen, OpenGL compatible).
* Practitioners are advised to keep near-far gap as narrow as possible,
to avoid inherent depth precision errors.
Args:
vectors (torch.Tensor):
the vectors to be transformed,
can homogeneous of shape :math:`(\text{num_vectors}, 4)`
or :math:`(\text{num_cameras}, \text{num_vectors}, 4)`
or non-homogeneous of shape :math:`(\text{num_vectors}, 3)`
or :math:`(\text{num_cameras}, \text{num_vectors}, 3)`
Returns:
(torch.Tensor): the transformed vectors, of same shape as ``vectors`` but, with homogeneous coordinates,
where the last dim is 4
"""
proj = self.projection_matrix()
# Expand input vectors to 4D homogeneous coordinates if needed
homogeneous_vecs = up_to_homogeneous(vectors)
num_cameras = len(self) # C - number of cameras
batch_size = vectors.shape[-2] # B - number of vectors
v = homogeneous_vecs.expand(num_cameras, batch_size, 4)[..., None] # Expand as (C, B, 4, 1)
proj = proj[:, None].expand(num_cameras, batch_size, 4, 4) # Expand as (C, B, 4, 4)
transformed_v = proj @ v
transformed_v = transformed_v.squeeze(-1) # Reshape: (C, B, 4)
return transformed_v # Return shape: (C, B, 4)
def transform(self, vectors: torch.Tensor) -> torch.Tensor:
r"""
Applies perspective projection to obtain Normalized Device Coordinates
(this function also performs perspective division).
Assumptions:
* Camera is looking down the negative z axis (that is: Z axis points outwards from screen, OpenGL compatible).
* Practitioners are advised to keep near-far gap as narrow as possible,
to avoid inherent depth precision errors.
Args:
vectors (torch.Tensor):
the vectors to be transformed,
can homogeneous of shape :math:`(\text{num_vectors}, 4)`
or :math:`(\text{num_cameras}, \text{num_vectors}, 4)`
or non-homogeneous of shape :math:`(\text{num_vectors}, 3)`
or :math:`(\text{num_cameras}, \text{num_vectors}, 3)`
Returns:
(torch.Tensor): the transformed vectors, of same shape as ``vectors`` but with non-homogeneous coords,
e.g. the last dim 3
"""
transformed_v = self.project(vectors) # Project with homogeneous coords to shape (C, B, 4)
normalized_v = down_from_homogeneous(transformed_v) # Perspective divide to shape: (C, B, 3)
return normalized_v # Return shape: (C, B, 3)
def normalize_depth(self, depth: torch.Tensor) -> torch.Tensor:
r"""Normalizes depth values to the NDC space defined by the view frustum.
Args:
depth (torch.Tensor):
the depths to be normalized, of shape :math:`(\text{num_depths},)`
or :math:`(\text{num_cameras}, \text{num_depths})`
Returns:
(torch.Tensor):
The normalized depth values to the ndc range defined by the projection matrix,
of shape :math:`(\text{num_cameras}, \text{num_depths})`
"""
if depth.ndim < 2:
depth = depth.expand(len(self), *depth.shape)
proj = self.projection_matrix()
a = -proj[:, 2, 2]
b = -proj[:, 2, 3]
depth = torch.clamp(depth, min=min(self.near, self.far), max=max(self.near, self.far))
# Here we allow depth to be 0, as it will result in 'inf' values which torch will soon clamp.
# If b is 0 as well, it most likely means the choice of near / far planes and ndc coordinates is invalid.
ndc_depth = a - b / depth # from near: ndc_min to far: ndc_nax
ndc_min = min(self.ndc_min, self.ndc_max)
ndc_max = max(self.ndc_min, self.ndc_max)
normalized_depth = (ndc_depth - ndc_min) / (ndc_max - ndc_min) # from near: 0 to far: 1
normalized_depth = torch.clamp(normalized_depth, min=0.0, max=1.0)
return normalized_depth
@CameraIntrinsics.width.setter
def width(self, value: int) -> None:
""" Updates the width of the image plane.
The fov will remain invariant, and the focal length may change instead.
"""
# Keep the fov invariant and change focal length instead
fov = self.fov_x
self._shared_fields['width'] = value
self.fov_x = fov
@CameraIntrinsics.height.setter
def height(self, value: int) -> None:
""" Updates the hieght of the image plane.
The fov will remain invariant, and the focal length may change instead.
"""
# Keep the fov invariant and change focal length instead
fov = self.fov_y
self._shared_fields['height'] = value
self.fov_y = fov
@property
def x0(self) -> torch.FloatTensor:
"""The horizontal offset from the NDC origin in image space
By default, kaolin defines the NDC origin at the canvas center.
"""
return self.params[:, PinholeParamsDefEnum.x0]
@x0.setter
def x0(self, val: Union[float, torch.Tensor]) -> None:
self._set_param(val, PinholeParamsDefEnum.x0)
@property
def y0(self) -> torch.FloatTensor:
"""The vertical offset from the NDC origin in image space
By default, kaolin defines the NDC origin at the canvas center.
"""
return self.params[:, PinholeParamsDefEnum.y0]
@y0.setter
def y0(self, val: Union[float, torch.Tensor]) -> None:
self._set_param(val, PinholeParamsDefEnum.y0)
@property
def cx(self) -> torch.FloatTensor:
"""The principal point X coordinate.
Note: By default, the principal point is canvas center (kaolin defines the NDC origin at the canvas center).
"""
# Assumes the NDC x origin is at the center of the canvas
return self.width / 2.0 + self.params[:, PinholeParamsDefEnum.x0]
@property
def cy(self) -> torch.FloatTensor:
"""The principal point Y coordinate.
Note: By default, the principal point is canvas center (kaolin defines the NDC origin at the canvas center).
"""
# Assumes the NDC y origin is at the center of the canvas
return self.height / 2.0 + self.params[:, PinholeParamsDefEnum.y0]
@property
def focal_x(self) -> torch.FloatTensor:
return self.params[:, PinholeParamsDefEnum.focal_x]
@focal_x.setter
def focal_x(self, val: Union[float, torch.Tensor]) -> None:
self._set_param(val, PinholeParamsDefEnum.focal_x)
@property
def focal_y(self) -> torch.FloatTensor:
return self.params[:, PinholeParamsDefEnum.focal_y]
@focal_y.setter
def focal_y(self, val: Union[float, torch.Tensor]) -> None:
self._set_param(val, PinholeParamsDefEnum.focal_y)
def tan_half_fov(self, camera_fov_direction: CameraFOV = CameraFOV.VERTICAL) -> torch.FloatTensor:
r"""tan(fov/2) in radians
Args:
camera_fov_direction (optional, CameraFOV):
the leading direction of the fov. Default: vertical
Returns:
(torch.Tensor): tan(fov/2) in radians, of size :math:`(\text{num_cameras},)`
"""
if camera_fov_direction is CameraFOV.HORIZONTAL:
tanHalfAngle = self.focal_x.new_tensor([self.width / 2.0]) / self.focal_x
elif camera_fov_direction is CameraFOV.VERTICAL:
tanHalfAngle = self.focal_y.new_tensor([self.height / 2.0]) / self.focal_y
else:
raise ValueError(f'Unsupported CameraFOV direction enum given to tan_half_fov: {camera_fov_direction}')
return tanHalfAngle
def fov(self, camera_fov_direction: CameraFOV = CameraFOV.VERTICAL, in_degrees=True) -> torch.FloatTensor:
r"""The field-of-view
Args:
camera_fov_direction (CameraFOV):
the leading direction of the fov. Default: vertical
in_degrees (bool): if True return result in degrees, else in radians. Default: True
Returns:
(torch.Tensor): the field-of-view, of shape :math:`(\text{num_cameras},)`
"""
if camera_fov_direction is CameraFOV.HORIZONTAL:
x, y = self.focal_x, self.width / 2.0
elif camera_fov_direction is CameraFOV.VERTICAL:
x, y = self.focal_y, self.height / 2.0
y = x.new_tensor(y)
fov = 2 * torch.atan2(y, x)
if in_degrees:
fov = fov * 180 / np.pi
return fov
@property
def fov_x(self):
"""The field-of-view on horizontal leading direction"""
return self.fov(CameraFOV.HORIZONTAL, in_degrees=True)
@fov_x.setter
def fov_x(self, angle_degs: Union[float, torch.Tensor]) -> None:
if isinstance(angle_degs, torch.Tensor):
angle_degs = angle_degs.to(device=self.focal_x.device, dtype=self.focal_x.dtype)
else:
angle_degs = self.focal_x.new_tensor(angle_degs)
fov = angle_degs / 180 * np.pi
tanHalfAngle = torch.tan(fov / 2.0)
aspectScale = self.width / 2.0
self.focal_x = aspectScale / tanHalfAngle
@property
def fov_y(self):
"""The field-of-view on vertical leading direction"""
return self.fov(CameraFOV.VERTICAL, in_degrees=True)
@fov_y.setter
def fov_y(self, angle_degs: Union[float, torch.Tensor]) -> None:
if isinstance(angle_degs, torch.Tensor):
angle_degs = angle_degs.to(device=self.focal_y.device, dtype=self.focal_y.dtype)
else:
angle_degs = self.focal_y.new_tensor(angle_degs)
fov = angle_degs / 180 * np.pi
tanHalfAngle = torch.tan(fov / 2.0)
aspectScale = self.height / 2.0
self.focal_y = aspectScale / tanHalfAngle
def zoom(self, amount):
r"""Applies a zoom on the camera by adjusting the lens.
Args:
amount (torch.Tensor or float):
Amount of adjustment, measured in degrees.
Mind the conventions -
To zoom in, give a positive amount (decrease fov by amount -> increase focal length)
To zoom out, give a negative amount (increase fov by amount -> decrease focal length)
"""
fov_ratio = self.fov_x / self.fov_y
self.fov_y -= amount
self.fov_x = self.fov_y * fov_ratio # Make sure the view is not distorted
|
#! /usr/bin/env python
import argparse
import pyfits
import numpy as np
import matplotlib.pyplot as plt
from target_utils import apertures_from_region
def head_append(header):
new=header
new['CTYPE1']='LINEAR'
new['CTYPE2']='LINEAR'
new['CDELT1']=1
new['CDELT2']=1
new['CD1_1']=1
new['CD2_2']=1
new['DISPAXIS']=1
new['CRPIX1']=1
new['CRVAL1']=1
return new
def main():
parser = argparse.ArgumentParser(description='Extracts apertures into 1D spectral cuts.')
parser.add_argument('ffimg',type=str,help='Flatfielded image file.')
parser.add_argument('reg',type=str,help='Region file defining apertures.')
parser.add_argument('name',type=str,help='Name of data set (e.g. NGC253).')
args=parser.parse_args()
apertures=apertures_from_region(args.ffimg,args.reg)
aps1D=[]
for i in apertures:
aps1D.append(np.mean(i,axis=0))
header=pyfits.getheader(args.ffimg)
new_head=head_append(header)
### not sure about this
'''
aps1D_flipped=aps1D[::-1] #reverses the aperture order back to normal'''
aps1D_final=[]
for j in aps1D:
aps1D_final.append(j[::-1]) #flips x-axis horizontally
print 'Writing spectral cuts to %s.ms.fits file.' % args.name
pyfits.writeto('%s.ms.fits' % (args.name), aps1D_final, new_head, clobber=True)
if __name__ == '__main__':
main()
|
import streamlit as st
import base64
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
st.set_page_config(layout="wide")
st.title('EURO 2020 ANALYSIS')# st.set_page_config(layout="wide")
st.markdown("""
This app is developed by Theevagaraju to perform an analysis on EURO 2020
* **Python**: pandas,streamlit
""")
@st.cache
def load_data():
data = pd.read_csv("Euro_2012_stats_TEAM.csv",encoding= 'unicode_escape')
return data
df = load_data()
st.sidebar.header('User Input')
sorted_unique_team = sorted(df['Team'].unique())
option = st.sidebar.multiselect('Country', sorted_unique_team)
df_option = df[(df.Team.isin(option))]
st.write('Data Dimension: ' +str(df.shape[0])+' **rows** and '+str(df.shape[1]) +' **columns**')
st.write(df.head())
st.markdown("Sorted by **Goals**, **Shots On Target**, **Passing Accuracy**, and **Displinary**")
st.write(df_option[['Team','Goals','Shots on target','Passing Accuracy','Yellow Cards', 'Red Cards']].sort_values(['Team'], ascending = True))
# st.write(df.loc[df.Team.isin(['England', 'Italy', 'Russia']), ['Team','Shooting Accuracy']])
# st.sidebar.markdown('You have chosen : '+ option)
st.markdown("Statistics(Average) for each country on **Goals**, **Shots On Target**, **Passing Accuracy**, **Shooting Accuracy**,**Shots off target**, and **Saves made**")
#filtering data
# 'You selected:', option
st.write(df_option[['Team','Goals','Shots on target','Shots off target','Saves made']].mean())
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.generic import TemplateView
import json
import re
import boto
from boto.s3.key import Key
from django.conf import settings
class FileView(TemplateView):
def buildTree(list, parent_id='root'):
branch = []
for li in list:
if li['parent_id'] == parent_id:
items = FileView.buildTree(list, li['text'])
if items:
li['items'] = items
branch.append(li)
return branch
def newfolder(request):
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
key = request.POST.get('key') + '/' + request.POST.get('name') + '/'
k = bucket.new_key(key)
k.set_contents_from_string('')
res = {
'result': True,
'message': 'New folder created successfully'
}
return HttpResponse(json.dumps(res), content_type="application/json")
def deletefolder(request):
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
key = request.POST.get('key')
type = request.POST.get('type')
if type == 'folder':
for key in bucket.list(prefix=key):
key.delete()
else:
k = bucket.delete_key(key)
print(key)
res = {
'result': True,
'message': 'Deleted successfully'
}
return HttpResponse(json.dumps(res), content_type="application/json")
def getlink(request):
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
key = request.POST.get('key')
key_detail = bucket.get_key(key)
key_url = key_detail.generate_url(0, query_auth=False, force_http=True)
print(key_url)
res = {
'result': True,
'key_url':key_url
}
return HttpResponse(json.dumps(res), content_type="application/json")
def rename(request):
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
oldKey = request.POST.get('key')
name = request.POST.get('name')
newKey = ''
arr = oldKey.split('/')
index = len(arr) - 1
for num in range(0, len(arr) - 1):
newKey += arr[num] + '/'
newKey += name + '/'
# print (newKey)
fileList = []
if request.POST.get('type') == 'folder':
files = bucket.list(prefix=oldKey)
for file in files:
fileList.append(file.name)
# print(file.name)
newFileList = []
for file in fileList:
newArr = file.split('/')
newArr[index] = name
result = ''
for num in range(0, len(newArr)):
result += newArr[num] + '/'
result = result[:-1]
newFileList.append(result)
for num in range(0, len(newFileList)):
bucket.copy_key(newFileList[num], settings.AWS_STORAGE_BUCKET_NAME, fileList[num])
bucket.delete_key(fileList[num])
else:
newKey = newKey[:-1]
bucket.copy_key(newKey, settings.AWS_STORAGE_BUCKET_NAME, oldKey)
bucket.delete_key(oldKey)
res = {
'result': True,
'message': 'Rename successfully'
}
return HttpResponse(json.dumps(res), content_type="application/json")
def urlify(s):
s = re.sub(r"[^\w\s]", '_', s)
s = re.sub(r"\s+", '_', s)
return s
def fileupload(request):
file = request.FILES['files']
key = request.POST.get('key')
response = {}
filename = FileView.urlify(file.name.split('.')[0])
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
contentType = file.content_type
key_name = key + '/' + file.name
k = Key(bucket)
k.key = key_name
print(key_name)
if not k.exists():
key = bucket.new_key(key_name)
key.set_contents_from_string(file.read())
key.set_metadata('Content-Type', contentType)
key.set_acl('public-read')
key.make_public()
response['success'] = True;
response['msg'] = "Successfully Uploaded";
else:
response['success'] = False;
response['msg'] = "File name already exists";
return HttpResponse(json.dumps(response), content_type="application/json")
def filecut(request):
response = {}
if request.POST.get('type') != 'rootfolder':
sourceKey = request.POST.get('sourceKey')
destKey = request.POST.get('destKey')
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
print(sourceKey)
arrSourceKey = sourceKey.split('/')
destKey += '/' + arrSourceKey[len(arrSourceKey) - 1]
print(destKey)
fileList = []
if request.POST.get('type') == 'folder':
files = bucket.list(prefix=sourceKey)
for file in files:
fileList.append(file.name)
newFileList = []
for file in fileList:
newArr = file.split('/')
result = destKey + '/' + newArr[len(newArr) - 1]
# print(result)
newFileList.append(result)
for num in range(0, len(newFileList)):
print (newFileList[num])
print (fileList[num])
bucket.copy_key(newFileList[num], settings.AWS_STORAGE_BUCKET_NAME, fileList[num])
bucket.delete_key(fileList[num])
else:
bucket.copy_key(destKey, settings.AWS_STORAGE_BUCKET_NAME, sourceKey)
bucket.delete_key(sourceKey)
response['result'] = True
response['message'] = "Successfully Moved"
return HttpResponse(json.dumps(response), content_type="application/json")
else:
return HttpResponse(json.dumps(response), content_type="application/json")
def filecopy(request):
response = {}
if request.POST.get('type') != 'rootfolder':
sourceKey = request.POST.get('sourceKey')
destKey = request.POST.get('destKey')
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
print(sourceKey)
arrSourceKey = sourceKey.split('/')
destKey += '/' + arrSourceKey[len(arrSourceKey) - 1]
print(destKey)
fileList = []
if request.POST.get('type') == 'folder':
files = bucket.list(prefix=sourceKey)
for file in files:
fileList.append(file.name)
newFileList = []
for file in fileList:
newArr = file.split('/')
result = destKey + '/' + newArr[len(newArr) - 1]
# print(result)
newFileList.append(result)
for num in range(0, len(newFileList)):
print (newFileList[num])
print (fileList[num])
bucket.copy_key(newFileList[num], settings.AWS_STORAGE_BUCKET_NAME, fileList[num])
else:
bucket.copy_key(destKey, settings.AWS_STORAGE_BUCKET_NAME, sourceKey)
response['result'] = True
response['message'] = "Successfully Copied"
return HttpResponse(json.dumps(response), content_type="application/json")
else:
return HttpResponse(json.dumps(response), content_type="application/json")
def files(request):
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
fileList = []
url = settings.AWS_STORAGE_BUCKET_ROOT_FOLDER+"/"
files = bucket.list(prefix=url)
for file in files:
fileList.append(file.name)
objList = []
rootdict = {
'text': settings.AWS_STORAGE_BUCKET_ROOT_FOLDER,
'parent_id': 'root',
'expanded': True,
'spriteCssClass': 'rootfolder',
'path': "public/"
}
objList.append(rootdict)
for obj in fileList:
str = obj.split("/")
print(obj)
depth_level = len(str)
path = "public"
for index in range(0, depth_level):
if index + 1 < depth_level:
if str[index + 1] != '':
dict = {}
dict['text'] = str[index + 1]
path = path + "/" + str[index + 1]
dict['parent_id'] = str[index]
dict['path'] = path
dict['expanded']=True
file = str[index + 1].split(".")
if len(file) == 1:
dict['spriteCssClass'] = 'folder'
else:
if file[1].lower() == "png" or file[1].lower() == "jpeg" or file[1].lower() == "jpg":
dict['spriteCssClass'] = 'image'
elif file[1].lower() == "css" or file[1].lower() == "html":
dict['spriteCssClass'] = 'html'
elif file[1].lower() == "pdf":
dict['spriteCssClass'] = 'pdf'
elif file[1].lower() == "txt":
dict['spriteCssClass'] = 'html'
else:
dict['spriteCssClass'] = 'html'
found = False
for object in objList:
if object['text'] == dict['text'] and object['parent_id'] == dict['parent_id']:
found = True;
break;
if not found:
objList.append(dict)
ph = FileView.buildTree(objList)
context = {
'filelist': json.dumps(ph)
}
return render(request, 'file/file.html', context)
|
# file name: problem2.py
def example1():
""" This is an example for matrix [[0, 1], [1, 0]]
Note:
For the multi-qubit or multi-operations quantum circuit, use ';' to split them.
"""
quantum_circuit = 'X | qubits[0]'
return quantum_circuit
def example2():
""" This is an example for matrix [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]
Note:
For the multi-qubit or multi-operations quantum circuit, use ';' to split them.
"""
quantum_circuit = 'CX | (qubits[0], qubits[1])'
return quantum_circuit
def example3():
""" This is an example for matrix [[0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0]]
Note:
For the multi-qubit or multi-operations quantum circuit, use ';' to split them.
"""
quantum_circuit = 'X | qubits[0]; X | qubits[1]; CX | (qubits[0], qubits[1])'
return quantum_circuit
def subproblem1():
""" Now you need to find a circuit equals to matrix [[1, 1], [1, -1]] / sqrt(2)
"""
# You can change your circuit here
quantum_circuit = 'H | qubits[0]'
return quantum_circuit
def subproblem2():
""" Now you need to find a circuit equals to matrix [[1, 1], [-1, 1]] / sqrt(2)
"""
# You can change your circuit here
quantum_circuit = 'X | qubits[0]; H | qubits[0]'
return quantum_circuit
def subproblem3():
""" Now you need to find a circuit equals to matrix [[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]]
"""
# You can change your circuit here
quantum_circuit = 'X | qubits[0]; X | qubits[1])'
return quantum_circuit
def subproblem4():
""" Now you need to find a circuit equals to matrix [[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]
"""
# You can change your circuit here
quantum_circuit = 'CX | (qubits[0], qubits[1]); CX | (qubits[1], qubits[0]); CX | (qubits[0], qubits[1])'
return quantum_circuit
def subproblem5():
""" Now you need to find a circuit equals to matrix [[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]
"""
# You can change your circuit here
quantum_circuit = ''
return quantum_circuit
|
from functools import reduce
import time
def timeit(func, *args, **kwargs):
try:
startime = time.time()
print(startime)
result = func(*args, **kwargs)
stoptime = time.time()
print(stoptime)
usetime = stoptime - startime
except Exception as e:
return 'Error'
return result, usetime
def mult(x, y):
return x * y
N = 5
Njiecheng = reduce(mult, list(range(1, N+1)))
print(Njiecheng)
Njiecheng1 = reduce(lambda x, y: x * y, list(range(1, N+1)))
def factorial(n):
if n == 0 or n == 1:
return 1
else:
return (n * factorial(n-1))
print(timeit(factorial, 800)) |
from rest_framework import serializers
from api.models import Cabinet, Charger, SiteNavi, Collective, ProductLine, Project, AuthCenter, Interval, Crontab, Cxfb
class ChargerModelSerializer(serializers.ModelSerializer):
class Meta:
model = Charger
fields = "__all__"
class AuthCenterModelSerializer(serializers.ModelSerializer):
class Meta:
model = AuthCenter
fields = "__all__"
class IntervalModelSerializer(serializers.Serializer):
id = serializers.CharField()
interval_amount = serializers.IntegerField()
interval_measure = serializers.CharField()
name = serializers.SerializerMethodField()
def get_name(self, obj):
return str(obj.interval_amount) + "/" + obj.interval_measure
class MissionSerializer(serializers.Serializer):
id = serializers.CharField()
name = serializers.CharField()
type = serializers.CharField()
content = serializers.CharField()
# interval = serializers.SerializerMethodField()
enabled = serializers.CharField()
type = serializers.CharField()
description = serializers.CharField()
crontab = serializers.SerializerMethodField()
c_id = serializers.SerializerMethodField()
status = serializers.SerializerMethodField()
# def get_interval(self, obj):
# if obj.interval:
# return str(obj.interval.interval_amount) + "/" + obj.interval.interval_measure
# else:
# return ''
def get_crontab(self, obj):
if obj.crontab:
return str(obj.crontab.minute) + " " + obj.crontab.hour + " " +obj.crontab.day_week + " " \
+obj.crontab.day_month + " " +obj.crontab.month_year
else:
return ''
def get_c_id(self, obj):
if obj.crontab:
return obj.crontab.id
else:
return ''
def get_status(self, obj):
if obj.enabled == '1':
return 'running'
else:
return 'stopped'
class CrontabModelSerializer(serializers.Serializer):
id = serializers.CharField()
minute = serializers.CharField()
hour = serializers.CharField()
day_week = serializers.CharField()
day_month = serializers.CharField()
month_year = serializers.CharField()
name = serializers.SerializerMethodField()
def get_name(self, obj):
return str(obj.minute) + " " + obj.hour + " " +obj.day_week + " " +obj.day_month + " " +obj.month_year
class ProjectModelSerializer(serializers.Serializer):
id = serializers.CharField()
p_name = serializers.CharField()
p_desc = serializers.CharField()
language_type = serializers.CharField()
project_type = serializers.CharField()
server_type = serializers.CharField()
app_frame = serializers.CharField()
warehouse_type = serializers.CharField()
warehouse_url = serializers.CharField()
deploy_path = serializers.CharField()
conf_path = serializers.CharField()
productline = serializers.SerializerMethodField()
charger = serializers.SerializerMethodField()
productline_name = serializers.SerializerMethodField()
c_name = serializers.SerializerMethodField()
c_tel = serializers.SerializerMethodField()
def get_productline_name(self, obj):
if obj.productline:
return obj.productline.name
else:
return ""
def get_productline(self, obj):
if obj.productline:
return obj.productline.id
else:
return ""
def get_charger(self, obj):
if obj.charger:
return obj.charger.id
else:
return ""
def get_c_name(self, obj):
if obj.charger:
return obj.charger.name
else:
return ""
def get_c_tel(self, obj):
if obj.charger:
return obj.charger.tel
else:
return ""
class ProductLineModelSerializer(serializers.Serializer):
id = serializers.CharField()
name = serializers.CharField()
desc = serializers.CharField()
c_id = serializers.SerializerMethodField()
c_name = serializers.SerializerMethodField()
c_tel = serializers.SerializerMethodField()
c_email = serializers.SerializerMethodField()
def get_c_id(self, obj):
return obj.charger.id
def get_c_name(self, obj):
return obj.charger.name
def get_c_tel(self, obj):
return obj.charger.tel
def get_c_email(self, obj):
return obj.charger.email
class SiteNaviModelSerializer(serializers.ModelSerializer):
class Meta:
model = SiteNavi
fields = "__all__"
class CollectiveModelSerializer(serializers.ModelSerializer):
class Meta:
model = Collective
fields = "__all__"
class CxfbSerializer(serializers.Serializer):
id = serializers.CharField()
c_name = serializers.SerializerMethodField()
deploy_type = serializers.CharField()
status = serializers.CharField()
if_clean = serializers.CharField()
if_local = serializers.CharField()
shell = serializers.CharField()
w_type = serializers.SerializerMethodField()
p_id = serializers.SerializerMethodField()
machines = serializers.SerializerMethodField()
def get_machines(self, obj):
result = []
for o in obj.machines.all():
result.append({'text': o.host_name, 'value': o.ip})
return result
def get_p_id(self, obj):
if obj.project:
return str(obj.project.id)
else:
return ""
def get_c_name(self, obj):
if obj.project:
return obj.project.p_name
else:
return ""
def get_w_type(self, obj):
if obj.project:
return 'SVB' if obj.project.warehouse_type == '1' else 'Git'
else:
return ""
class RoomSerializer(serializers.Serializer):
id = serializers.CharField()
no = serializers.CharField()
name = serializers.CharField()
address = serializers.CharField()
machine_num = serializers.IntegerField()
charger = serializers.SerializerMethodField()
def get_charger(self, obj):
return obj.charger.name
class CabinetSerializer(serializers.Serializer):
id = serializers.CharField()
no = serializers.CharField()
name = serializers.CharField()
address = serializers.CharField()
machine_num = serializers.IntegerField()
charger = serializers.SerializerMethodField()
room = serializers.SerializerMethodField()
def get_room(self, obj):
return obj.room.name
def get_charger(self, obj):
return obj.room.charger.name |
# load svm trained model to predict cars color
import pickle
from sklearn.model_selection import train_test_split
from sklearn import svm
from matplotlib import pyplot as plt
import os
import cv2 as cv
import numpy as np
from sklearn.metrics import plot_confusion_matrix
def extract_features_hist(img_path, bins):
"""
Calculates histogram of input color image
:param img_path: full absolute path to image
:param bins: number of bins to calculate histogram
:return: histogoram as list
"""
imag = cv.imread(img_path, 1)
hist, bins = np.histogram(imag.flatten(), bins=bins, range=[0, 255])
return list(hist)
# return list(hist.squeeze())
def scan_folder(path):
'''
extracts files from a specific folder
:param path: input path of a folder
:return: all images in the path as a list
'''
return [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if __name__ == "__main__":
src_path = r'C:\Users\E17538\OneDrive - Uniper SE\Desktop\DailyActivities\FAD\ACV_Ses3\HW_Ses3\cars'
dir_list = os.listdir(src_path)
dest_path = r'C:\Users\E17538\OneDrive - Uniper SE\Desktop\DailyActivities\FAD\ACV_Ses3\HW_Ses3\kmean_cars'
labels = []
biin = 24
print('Running to save best model for bin = {}'.format(biin))
# calculate histogram (extract features for each car)
# take the number of features the same as best number of centroids + 2
dir_list = os.listdir(src_path)
labels = []
feature_vector = []
for fol in dir_list:
labels.append(fol)
n_path = os.path.join(src_path, fol)
img_list = scan_folder(n_path)
for img in img_list:
car_feature = extract_features_hist(os.path.join(n_path, img), biin)
car_feature.insert(0, fol)
feature_vector.append(car_feature)
y = [row[0] for row in feature_vector]
X = [row[1:] for row in feature_vector]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1, stratify=y)
# loaded_model = pickle.load(open('svm_color_classifier.pkl', 'rb'))
loaded_model = pickle.load(open('svm_color_classifier_poly.pkl', 'rb'))
result = loaded_model.score(X_test, y_test)
print('X_test index 0 is {}'.format(X_test[0]))
print('result is {}'.format(result))
# pred = loaded_model.predict(np.array([80945, 115532, 228628, 284049, 246331, 234232, 193999, 149803, 176310]).reshape(1, -1))
# # ([3, 0, 0, 0, 1, 0, 0, 2, 0])
# print('pred is {}'.format(pred))
# Plot non-normalized confusion matrix
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(loaded_model, X_test, y_test,
display_labels=labels,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
# svm_color_classifier_poly.pkl Running to save the best model for bin = 24 score=62.5%
# svm_color_classifier_sigmoid.pkl Running to save the best model for bin = 7 score = 40%
# svm_color_classifier_rbf.pkl Running to save the best model for bin = 24 score = 50%
# svm_color_classifier_poly_gamma01.pkl[{'bins': 14, 'score': 0.55}] Running to save the best model for bin = 14 g=0.1
# C=100, gamma = 1, poly score = 50% Running to save the best model for bin = 9
|
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.utils import simplejson
from django.template import RequestContext
from django.http import HttpResponseRedirect
from crawler import spider
from crawler import monitor
from crawler.models import Item
from datetime import *
import threading
amazonlist = []
dangdanglist = []
jdlist = []
taobaolist = []
yhdlist = []
class AmazonThread(threading.Thread):
def __init__(self, keyword):
self.keyword = keyword
threading.Thread.__init__(self)
def run (self):
global amazonlist
amazonlist = spider.amazon(self.keyword)
class DangdangThread(threading.Thread):
def __init__(self, keyword):
self.keyword = keyword
threading.Thread.__init__(self)
def run (self):
global dangdanglist
dangdanglist = spider.dangdang(self.keyword)
class JdThread(threading.Thread):
def __init__(self, keyword):
self.keyword = keyword
threading.Thread.__init__(self)
def run (self):
global jdlist
jdlist = spider.jd(self.keyword)
class TaobaoThread(threading.Thread):
def __init__(self, keyword):
self.keyword = keyword
threading.Thread.__init__(self)
def run (self):
global taobaolist
taobaolist = spider.taobao(self.keyword)
class YhdThread(threading.Thread):
def __init__(self, keyword):
self.keyword = keyword
threading.Thread.__init__(self)
def run (self):
global yhdlist
yhdlist = spider.yhd(self.keyword)
class UpdateThread(threading.Thread):
def __init__(self, items):
self.items = items
threading.Thread.__init__(self)
def run (self):
for item in self.items:
if item.types == 'amazon':
monitor.amazon_update(item)
elif item.types == 'dangdang':
monitor.dangdang_update(item)
elif item.types == 'yhd':
monitor.yhd_update(item)
else:
monitor.taobao_update(item)
def search(request):
global amazonlist
global dangdanglist
global yhdlist
global taobaolist
amazonlist = []
dangdanglist = []
yhdlist = []
taobaolist = []
threads = []
keyword = request.GET.get("search", "")
types = request.GET.get("filter", "")
if types == "all":
amazon_t = AmazonThread(keyword)
dangdang_t = DangdangThread(keyword)
yhd_t = YhdThread(keyword)
taobao_t = TaobaoThread(keyword)
threads.append(amazon_t)
threads.append(dangdang_t)
threads.append(yhd_t)
threads.append(taobao_t)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
elif types == "amazon":
amazonlist = spider.amazon(keyword)
elif types == "dangdang":
dangdanglist = spider.dangdang(keyword)
elif types == "yhd":
yhdlist = spider.yhd(keyword)
else:
taobaolist = spider.taobao(keyword)
return render_to_response('result.html',{'amazonlist':amazonlist, 'dangdanglist':dangdanglist, \
'yhdlist':yhdlist, 'taobaolist':taobaolist, 'user':request.user})
def track(request):
if request.user.is_authenticated():
href = request.GET['hrefs']
print href
types = request.GET['col']
print types
name = request.GET['names'].strip()
img = request.GET['img']
price = request.GET['price'].strip()
m = str(datetime.today().month)
if len(m) < 2:
m = '0' + m
d = str(datetime.today().day)
if len(d) < 2:
d = '0' + d
dates = m + '.' + d
if ( (cmp(price[0],'0')<0) or (cmp(price[0],'9')>0) ):
price = price[1:].strip()
try:
Item.objects.get(href=href,user=request.user)
return HttpResponse(simplejson.dumps({'codes':'ALREADY'}))
except Item.DoesNotExist:
item = Item(types=types,href=href,name=name,img=img,user=request.user,prices=price,dates=dates)
item.save()
return HttpResponse(simplejson.dumps({'codes':'SUCCESS'}))
else:
return HttpResponse(simplejson.dumps({'codes':'LOGIN'}))
def detrack(request,item_id):
if request.user.is_authenticated():
try:
item = Item.objects.get(id=str(item_id))
item.delete()
return HttpResponseRedirect("/lists", RequestContext(request))
except Item.DoesNotExist:
return HttpResponseRedirect("/lists", RequestContext(request))
else:
return HttpResponseRedirect("/login", RequestContext(request))
def lists(request):
if request.user.is_authenticated():
items = Item.objects.filter(user=request.user)
updated_items = Item.objects.filter(is_updated=True)
for item in updated_items:
item.is_updated = False
item.save()
return render_to_response('lists.html',{'items':items,'updated_items':updated_items,'user':request.user})
else:
return render_to_response('login.html',RequestContext(request))
def realupdate(request):
if request.user.is_authenticated():
items = Item.objects.filter(user=request.user)
num = 0
l = len(items)
threads = []
while (num < l):
update_t = UpdateThread(items[num:num+3])
threads.append(update_t)
update_t.start()
num = num + 3
for thread in threads:
thread.join()
updated_items = Item.objects.filter(is_updated=True)
for item in updated_items:
item.is_updated = False
item.save()
return render_to_response('lists.html',{'items':items,'updated_items':updated_items,'user':request.user})
else:
return render_to_response('login.html',RequestContext(request))
def detail(request,item_id):
if request.user.is_authenticated():
try:
item = Item.objects.get(id=str(item_id))
prices = item.prices.split(',')
maxP = 0;
minP = 10000000;
for i in range(len(prices)):
prices[i] = float(prices[i])
if prices[i] < minP:
minP = prices[i]
if prices[i] > maxP:
maxP = prices[i]
dates = item.dates.decode("utf-8").split(',')
return render_to_response('detail.html',{'item':item,'prices':prices,'dates':dates,'maxP':maxP,
'minP':minP, 'user':request.user})
except Item.DoesNotExist:
return HttpResponseRedirect("/lists", RequestContext(request))
else:
return HttpResponseRedirect("/login", RequestContext(request))
|
from flask import Blueprint, render_template, request, jsonify, url_for, redirect
from flask_login import login_required, current_user
from service import profile, stats
from datetime import datetime
from time import strptime
import json
import config
app = Blueprint("profile", __name__, url_prefix="/profile")
@app.route("/", methods=["GET"])
# @config.kbauth.login_required
def fetch_profile():
if current_user.is_authenticated:
userId=current_user.id
# print("userId = "+str(userId))
userProfile=profile.getUserProfile(userId)
record_count=stats.getRecordCount(userId)
hiredate = userProfile["data"]["hireDate"].split("-")
day = hiredate[2]
month = hiredate[1]
year = hiredate[0]
return render_template('/ver2.1/pages/profile.html', userProfile=userProfile, record_count=record_count,day=day,month=month,year=year)
else:
return redirect(url_for('index'))
@app.route("/get", methods=["GET"])
def getProfileData():
if current_user.is_authenticated:
userId = current_user.id
userProfile = profile.getUserProfile(userId)
hiredate = userProfile["data"]["hireDate"].split("-")
data={
"userId":userId,
"userProfile":userProfile,
"record_count":stats.getRecordCount(userId),
"hiredate" : hiredate,
"day" : hiredate[2],
"bday" :userProfile["data"]["birthDay"],
"month" : hiredate[1],
"bmonth" : userProfile["data"]["birthMonth"],
"year" : hiredate[0],
"byear" : userProfile["data"]["birthYear"]
}
return json.dumps(data)
else:
return redirect(url_for('index'))
@app.route("/edit", methods=["POST"])
def edit_profile():
if current_user.is_authenticated:
received_data = request.data
received_data = received_data.decode('utf-8')
data = json.loads(received_data)
data['userId'] = current_user.id
msg = profile.editUserProfile(data)
return json.dumps(msg)
else:
return redirect(url_for('index'))
|
#!/usr/bin/env python
import rospy
import sys
import matplotlib.pyplot as plt
import numpy as np
from network_faults.msg import Network, Velocity
txrx_pl = 0
txrx_td = 0
offset = 0
path_data = []
count = 0
pl_percent = 0.0
stop = 0
def gotdata(txrx):
global offset, txrx_pl,txrx_td, path_data, count, pl_percent, stop
if not stop:
txrx_pl = txrx.packet_loss
if offset == 0:
offset = txrx_pl
seq_val = txrx_pl - offset
print("seq, count: %s %s" % (seq_val, count))
if count != seq_val:
dropped_packets = [1] * (seq_val-count)
path_data.extend(dropped_packets)
count = seq_val
else:
path_data.append(0)
count += 1
print("Packet Loss Percentage: %s" % (float(pl_percent)/float(count)))
print(len(path_data))
if len(path_data) == 200:
sum = 0
for i in range(len(path_data)):
sum += int(path_data[i])
print("Mean Percentage of Packet Loss: %s" % (float(sum/len(path_data))))
stop = 1
rospy.init_node('GetData', anonymous=True)
network_sub = rospy.Subscriber("network_stats", Network, gotdata)
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
if stop:
print("converting data to csv")
path_data = np.asarray(path_data)
path_t = np.arange(len(path_data))
plt.plot(path_t[:], path_data[:])
plt.xlabel('t')
plt.ylabel('Packet Loss %')
plt.title('Packet Loss CDF')
plt.legend()
plt.show()
sys.exit(1)
rate.sleep()
|
import os, sys
sys.path.append(os.path.abspath(os.path.join('../..')))
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import r2_score
from scipy import stats
from numba import njit
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from bayesflow.trainers import MetaTrainer
from bayesflow.losses import kl_latent_space
from bayesflow.diagnostics import true_vs_estimated
from bayesflow.networks import Permutation
from bayesflow.models import GenerativeModel
class InvariantCouplingNet(tf.keras.Model):
"""Implements a conditional version of a sequential network."""
def __init__(self, meta, n_out):
"""
Creates a conditional coupling net (FC neural network).
----------
Arguments:
meta : list -- a list of dictionaries, wherein each dictionary holds parameter - value pairs for a single
tf.keras.Dense layer.
n_out : int -- number of outputs of the coupling net
"""
super(InvariantCouplingNet, self).__init__()
self.h1 = Sequential([Dense(**meta['dense_h1_args']) for _ in range(meta['n_dense_h1'])])
self.h2 = Sequential(
[Dense(**meta['dense_h2_args']) for _ in range(meta['n_dense_h2'])] +
[Dense(n_out)]
)
def call(self, m, params, x):
"""
Concatenates x and y and performs a forward pass through the coupling net.
Arguments:
m : tf.Tensor of shape (batch_size, n_models) -- the one-hot-encoded model indices
params : tf.Tensor of shape (batch_size, theta_dim) -- the parameters theta ~ p(theta) of interest
x : tf.Tensor of shape (batch_size, n_obs, inp_dim) -- the conditional data of interest x
"""
N = int(x.shape[1])
params_rep = tf.stack([params] * N, axis=1)
m_rep = tf.stack([m] * N, axis=1)
x_params_m = tf.concat([x, params_rep, m_rep], axis=-1)
rep = tf.reduce_mean(self.h1(x_params_m), axis=1)
rep_params_m = tf.concat([rep, params, m], axis=-1)
out = self.h2(rep_params_m)
return out
class ConditionalCouplingLayer(tf.keras.Model):
"""Implements a conditional version of the INN block."""
def __init__(self, meta):
"""
Creates a conditional invertible block.
----------
Arguments:
meta : list -- a list of dictionaries, wherein each dictionary holds parameter - value pairs for a single
tf.keras.Dense layer. All coupling nets are assumed to be equal.
"""
super(ConditionalCouplingLayer, self).__init__()
self.alpha = meta['alpha']
theta_dim = meta['n_params']
self.n_out1 = theta_dim // 2
self.n_out2 = theta_dim // 2 if theta_dim % 2 == 0 else theta_dim // 2 + 1
if meta['permute']:
self.permutation = Permutation(theta_dim)
else:
self.permutation = None
self.s1 = InvariantCouplingNet(meta['s_args'], self.n_out1)
self.t1 = InvariantCouplingNet(meta['t_args'], self.n_out1)
self.s2 = InvariantCouplingNet(meta['s_args'], self.n_out2)
self.t2 = InvariantCouplingNet(meta['t_args'], self.n_out2)
def call(self, m, params, x, inverse=False, log_det_J=True):
"""
Implements both directions of a conditional invertible block.
----------
Arguments:
m : tf.Tensor of shape (batch_size, n_models) -- the one-hot-encoded model indices
theta : tf.Tensor of shape (batch_size, theta_dim) -- the parameters theta ~ p(theta|y) of interest
x : tf.Tensor of shape (batch_size, summary_dim) -- the summarized conditional data of interest x = sum(x)
inverse : bool -- flag indicating whether to tun the block forward or backwards
log_det_J : bool -- flag indicating whether to return the log determinant of the Jacobian matrix
----------
Returns:
(v, log_det_J) : (tf.Tensor of shape (batch_size, inp_dim), tf.Tensor of shape (batch_size, )) --
the transformed input, if inverse = False, and the corresponding Jacobian of the transformation
if inverse = False
u : tf.Tensor of shape (batch_size, inp_dim) -- the transformed out, if inverse = True
"""
# --- Forward pass --- #
if not inverse:
if self.permutation is not None:
params = self.permutation(params)
u1, u2 = tf.split(params, [self.n_out1, self.n_out2], axis=-1)
# Pre-compute network outputs for v1
s1 = self.s1(m, u2, x)
# Clamp s1 if specified
if self.alpha is not None:
s1 = (2. * self.alpha / np.pi) * tf.math.atan(s1 / self.alpha)
t1 = self.t1(m, u2, x)
v1 = u1 * tf.exp(s1) + t1
# Pre-compute network outputs for v2
s2 = self.s2(m, v1, x)
# Clamp s2 if specified
if self.alpha is not None:
s2 = (2. * self.alpha / np.pi) * tf.math.atan(s2 / self.alpha)
t2 = self.t2(m, v1, x)
v2 = u2 * tf.exp(s2) + t2
v = tf.concat((v1, v2), axis=-1)
if log_det_J:
# log|J| = log(prod(diag(J))) -> according to inv architecture
return v, tf.reduce_sum(s1, axis=-1) + tf.reduce_sum(s2, axis=-1)
return v
# --- Inverse pass --- #
else:
v1, v2 = tf.split(params, [self.n_out1, self.n_out2], axis=-1)
# Pre-Compute s2
s2 = self.s2(m, v1, x)
# Clamp s2 if specified
if self.alpha is not None:
s2 = (2. * self.alpha / np.pi) * tf.math.atan(s2 / self.alpha)
u2 = (v2 - self.t2(m, v1, x)) * tf.exp(-s2)
# Pre-Compute s1
s1 = self.s1(m, u2, x)
# Clamp s1 if specified
if self.alpha is not None:
s1 = (2. * self.alpha / np.pi) * tf.math.atan(s1 / self.alpha)
u1 = (v1 - self.t1(m, u2, x)) * tf.exp(-s1)
u = tf.concat((u1, u2), axis=-1)
if self.permutation is not None:
u = self.permutation(u, inverse=True)
return u
class InvariantBayesFlow(tf.keras.Model):
"""Implements a chain of conditional invertible blocks for Bayesian parameter inference."""
def __init__(self, meta):
"""
Creates a chain of cINN blocks and chains operations.
----------
Arguments:
meta : list -- a list of dictionary, where each dictionary holds parameter - value pairs for a single
keras.Dense layer
"""
super(InvariantBayesFlow, self).__init__()
self.cINNs = [ConditionalCouplingLayer(meta) for _ in range(meta['n_coupling_layers'])]
self.z_dim = meta['n_params']
self.n_models = meta['n_models']
def call(self, m, params, x, inverse=False):
"""
Performs one pass through an invertible chain (either inverse or forward).
----------
Arguments:
m : tf.Tensor of shape (batch_size, n_models) -- the one-hot-encoded model indices
params : tf.Tensor of shape (batch_size, inp_dim) -- the parameters theta ~ p(theta|x) of interest
x : tf.Tensor of shape (batch_size, summary_dim) -- the conditional data x
inverse : bool -- flag indicating whether to tun the chain forward or backwards
----------
Returns:
(z, log_det_J) : (tf.Tensor of shape (batch_size, inp_dim), tf.Tensor of shape (batch_size, )) --
the transformed input, if inverse = False, and the corresponding Jacobian of the transformation
if inverse = False
x : tf.Tensor of shape (batch_size, inp_dim) -- the transformed out, if inverse = True
"""
if inverse:
return self.inverse(m, params, x)
else:
return self.forward(m, params, x)
def forward(self, m, params, x):
"""Performs a forward pass though the chain."""
z = params
log_det_Js = []
for cINN in self.cINNs:
z, log_det_J = cINN(m, z, x)
log_det_Js.append(log_det_J)
# Sum Jacobian determinants for all blocks to obtain total Jacobian.
log_det_J = tf.add_n(log_det_Js)
return z, log_det_J
def inverse(self, m, z, x):
"""Performs a reverse pass through the chain."""
params = z
for cINN in reversed(self.cINNs):
params = cINN(m, params, x, inverse=True)
return params
def sample(self, x, m, n_samples, to_numpy=True):
"""
Samples from the inverse model given a single instance x.
----------
Arguments:
x : tf.Tensor of shape (n_obs, x_dim) -- the conditioning data of interest
m : int - the integer model index
n_samples : int -- number of samples to obtain from the approximate posterior
to_numpy : bool -- flag indicating whether to return the samples as a np.array or a tf.Tensor
----------
Returns:
theta_samples : 3D tf.Tensor or np.array of shape (n_samples, n_batch, theta_dim)
"""
# Represent model index
m_oh = tf.stack([tf.keras.utils.to_categorical(m, self.n_models)] * n_samples, axis=0)
# Sample in parallel
z_normal_samples = tf.random.normal(shape=(n_samples, self.z_dim), dtype=tf.float32)
theta_samples = self.inverse(m_oh, z_normal_samples, tf.stack([x] * n_samples, axis=0))
if to_numpy:
return theta_samples.numpy()
return theta_samples |
from typing import Optional
from fastapi import FastAPI
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
app = FastAPI()
app.mount("/static", StaticFiles(directory="Crypto/static"), name="static")
templates = Jinja2Templates(directory="Crypto/templates") |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
class PhysiologicalPlotsCreator:
def __init__(self, baseFolder):
# After this you can chose what plots to create in the main method.
self.baseFolder = baseFolder
## Todo. not sure what this class will do |
#!/usr/bin/env python3
# import json
# import sys
import requests
import datetime
import urllib
import time
import subprocess
import os
from os.path import expanduser
import logging
# import pysnooper
###
#
# ToDo: Add probing macro generation
# Add file interaction on Duet (read/write)
# Calibrate then print
# Slice while calibrating and then print
# ??? Something computer vision ???
# Convert to real logging
# Create a printer object
# Config file
#
ip = '192.168.1.88'
baseurl = 'http://' + ip + '/'
targetdir = '/timelapse'
home = expanduser("~")
output = home + '/duetlog'
log = open(output, 'a')
logger = logging.getLogger('duet-log')
logfile = logging.FileHandler(output)
logfile.setLevel(logging.WARNING)
stdout = logging.StreamHandler()
stdout.setLevel(logging.CRITICAL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logfile.setFormatter(formatter)
stdout.setFormatter(formatter)
logger.addHandler(logfile)
logger.addHandler(stdout)
probe = '''G30 P0 X-87.60 Y-53.00 Z-99999 ; X tower
G30 P1 X0.00 Y-102.00 Z-99999 ; between X-Y towers
G30 P2 X85.60 Y-49.00 Z-99999 ; Y tower
G30 P3 X82.60 Y51.00 Z-99999 ; between Y-Z towers
G30 P4 X1.00 Y101.00 Z-99999 ; Z tower
G30 P5 X-88.60 Y53.00 Z-99999 ; between Z-X towers
G30 P6 X-43.30 Y-25.00 Z-99999 ; X tower
G30 P7 X0.00 Y-50.00 Z-99999 ; between X-Y towers
G30 P8 X43.30 Y-25.00 Z-99999 ; Y tower
G30 P9 X43.30 Y25.00 Z-99999 ; between Y-Z towers
G30 P10 X0.00 Y50.00 Z-99999 ; Z tower
G30 P11 X-43.30 Y25.00 Z-99999 ; between Z-X towers
G30 P12 X0 Y0 Z-99999 S-1 ; center and auto-calibrate 6 factors
G1X0.00 Y-102.00 Z2'''
def l_d(var):
new = '\n'
co = ', '
var_value = var
var_type = type(var)
val = 'V: {}\nT: {}\n'
print_value = val.format(var_value, new, var_type, new)
return_value = val.format(var_value, co, var_type)
return print_value, return_value
def get_state():
try:
status = requests.get(baseurl + 'rr_status?type=3')
except Exception as e:
# pr = l_d(e)
# print(pr)
logger.error(e, 'ConErr')
time.sleep(0.5)
pass
except KeyboardInterrupt:
exit(1)
duet = status.json()
# print('Duet is: {}\nStatus is: {}'.format(duet, status))
return duet, status
def get_duet(item):
duet = get_state()[0]
string = 'Field: {}\n Value: {}'.format(item, duet[item])
logger.debug(string, 'get_duet result')
return duet[item]
def duet_logger(log_data, tag):
log.write(datetime.datetime.now().isoformat() + ': {}: {}\n'.format(tag, log_data))
# @pysnooper.snoop()
def wait_until_ready(sequence):
before = sequence
logger.debug(str(sequence))
busy = {'B', 'P'}
while get_duet('status') in busy:
time.sleep(.5)
time.sleep(6)
sequence = get_duet('seq')
message = 'Sequence is now {} and was {}'.format(sequence, before)
logger.debug(message)
if sequence > before:
reply = requests.get(baseurl + 'rr_reply')
data = reply.text
logger.info(data)
if data:
return data
# @pysnooper.snoop()
def take_photo(duet): # Compile timelapse: avconv -y -r 25 -i Prusa-%d.jpg -r 25 -vcodec copy -crf 20 -g 6 compiled.mp4
function = 'take_photo'
dir = os.environ['HOME'] + targetdir
logger.debug('pausing', function)
wait_until_ready(send_gcode(gcoder('pause')))
log_line = 'Sent pause and taking photo of '
logger.debug(log_line + str(get_duet('currentLayer')), function)
os.chdir(dir)
image = ' --filename=' + str(get_duet('currentLayer')) + '.jpg'
photo = '/usr/bin/sudo /usr/bin/gphoto2 --wait-event=350ms --capture-image-and-download --force-overwrite'
log_line = 'Pause sent; executing: {} {} '.format(image, photo)
logger.info(log_line, function)
command = (photo + image)
subprocess.run(command, stdout=subprocess.DEVNULL, shell=True)
send_gcode(gcoder('resume'))
def send_gcode(code):
code = gcode_encode(code)
url = baseurl + 'rr_gcode?gcode=' + code
sequence = get_duet('seq')
requests.get(url)
return sequence
def gcoder(word):
gcodes = {
"pause": "M226",
"resume": "M24",
"more_probe": "M558 P4 H3 I1 R1 A4 B1",
"less_probe": "M558 P4 H3 I1 R1 A1 B1",
"home": "G28",
"autocal": "G32",
"probe": "G30 P" # Probe syntax G30 P# X# Y# Z-99999 to P9 And sned S-1
}
return gcodes[word]
def gcode_encode(line):
code = urllib.parse.quote(line)
return code
def warmup(material):
bed = 55
extruder = 195
bed = "M190 {}".format(str(bed))
extruder = "M109 {}".format(str(extruder))
print('Bed "{}", Extruder "{}"'.format(bed, extruder))
# send_gcode(bed)
# send_gcode(extruder)
return
# def material():
# materials = {
# 'pla': {'extruder': 195. 'bed': 55}
# 'petg': {'extruder': 225. 'bed': 90} }
# return material[materials]
def probe_parse(results):
spaces = results.count(' ')
results = results.replace(',', '')
coord_order = 'Xcoord, Ycoord, Zcoord' # Coord format - match here - G
if spaces == 22: # 30 P4 X-108.24 Y-62.5 Z-99999
macro = probe
else:
return None, None, None
split = results.split()
Zcoords = split[4:-7]
Zcoords = list(map(float, Zcoords))
probe_mean = float(split[-5])
probe_dev = float(split[-1])
logger.debug(coord_order)
Xcoords, Ycoords = parse_macro(macro)
cal = zip(Xcoords, Ycoords, Zcoords)
# for line in list(cal):
# log_and_print(line, 'parse-coords-xyz')
# data = "Z: {}, Mean: {}, Deviation: {}".format(Zcoords, probe_mean, probe_dev)
return cal, probe_mean, probe_dev
def parse_macro(macro):
macro_lines = macro.split('\n')
Xcoords, Ycoords = list(), list()
for line in macro_lines: # G30 P4 X-108.24 Y-62.5 Z-99999
split = line.split()
Xcoord, Ycoord = split[2][1:], split[3][1:]
Xcoords.append(float(Xcoord))
Ycoords.append(float(Ycoord))
return Xcoords, Ycoords
|
import sys
import caffe
if __name__ == '__main__':
solver_prototxt = sys.argv[1]
max_steps = int(sys.argv[2])
solver = caffe.SGDSolver(solver_prototxt)
if len(sys.argv) > 3:
solver_state = sys.argv[3]
solver.restore(solver_state)
for i in range(0, max_steps):
solver.step(1)
|
from rest_framework import routers
from csvapp.views import CSVview
router = routers.SimpleRouter()
router.register(r'',CSVview)
|
"""
Class for plotting a aircraft
Author: Josue H. F. Andrade
Based on: Daniel Ingram (daniel-s-ingram)
"""
from math import cos, sin
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FixedLocator, FormatStrFormatter
import matplotlib, time
class Aircraft():
def __init__(self, x=0, y=0, z=0, roll=0, pitch=0, yaw=0, size=1.0, show_animation=True):
self.p1 = np.array([(size-0.1)/2, 0, 0, 1]).T
self.p2 = np.array([0,(size/2), 0, 1]).T
self.p3 = np.array([-(size/2),(size/6), 0, 1]).T
self.p4 = np.array([-(size+0.2)/2, 0, 0, 1]).T
self.p5 = np.array([-(size+0.2)/2, 0,(size/6), 1]).T
self.p6 = np.array([-(size/2),-(size/6), 0, 1]).T
self.p7 = np.array([0,-(size/2), 0, 1]).T
self.p8 = np.array([-(size/2),0, 0, 1]).T
self.x_data = []
self.y_data = []
self.z_data = []
self.show_animation = show_animation
if self.show_animation:
plt.ion()
fig = plt.figure()
# for stopping simulation with the esc key.
fig.canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
self.ax = fig.add_subplot(111, projection='3d')
self.update_pose(x, y, z, roll, pitch, yaw)
def update_pose(self, x, y, z, roll, pitch, yaw):
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.x_data.append(x)
self.y_data.append(y)
self.z_data.append(z)
if self.show_animation:
self.plot()
def transformation_matrix(self):
x = self.x
y = self.y
z = self.z
roll = self.roll
pitch = self.pitch
yaw = self.yaw
return np.array(
[[cos(yaw) * cos(pitch), -sin(yaw) * cos(roll) + cos(yaw) * sin(pitch) * sin(roll), sin(yaw) * sin(roll) + cos(yaw) * sin(pitch) * cos(roll), x],
[sin(yaw) * cos(pitch), cos(yaw) * cos(roll) + sin(yaw) * sin(pitch)
* sin(roll), -cos(yaw) * sin(roll) + sin(yaw) * sin(pitch) * cos(roll), y],
[-sin(pitch), cos(pitch) * sin(roll), cos(pitch) * cos(yaw), z]
])
def plot(self): # pragma: no cover
T = self.transformation_matrix()
#p1_t = np.matmul(T, self.p1)
#p2_t = np.matmul(T, self.p2)
#p3_t = np.matmul(T, self.p3)
#p4_t = np.matmul(T, self.p4)
#p5_t = np.matmul(T, self.p5)
#p6_t = np.matmul(T, self.p6)
#p7_t = np.matmul(T, self.p7)
#p8_t = np.matmul(T, self.p8)
plt.cla()
#self.ax.plot([p1_t[0], p2_t[0], p3_t[0], p4_t[0], p5_t[0], p6_t[0], p7_t[0], p8_t[0]],
# [p1_t[1], p2_t[1], p3_t[1], p4_t[1], p5_t[1], p6_t[1], p7_t[1], p8_t[1]],
# [p1_t[2], p2_t[2], p3_t[2], p4_t[2], p5_t[2], p6_t[2], p7_t[2], p8_t[2]], 'k.', markersize=4)
#self.ax.plot([p1_t[0], p4_t[0]], [p1_t[1], p4_t[1]],
# [p1_t[2], p4_t[2]], 'r-')
#self.ax.plot([p2_t[0], p7_t[0]], [p2_t[1], p7_t[1]],
# [p2_t[2], p7_t[2]], 'r-')
#self.ax.plot([p3_t[0], p6_t[0]], [p3_t[1], p6_t[1]],
# [p3_t[2], p6_t[2]], 'r-')
#self.ax.plot([p4_t[0], p5_t[0]], [p4_t[1], p5_t[1]],
# [p4_t[2], p5_t[2]], 'r-')
#self.ax.plot([p5_t[0], p8_t[0]], [p5_t[1], p8_t[1]],
# [p5_t[2], p8_t[2]], 'r-')
#self.ax.plot(self.x_data, self.y_data, self.z_data, 'b:')
points = []
import csv
with open('/home/josuehfa/PFC/Mesh_3.asc') as mesh_file:
csv_reader = csv.reader(mesh_file, delimiter=',')
line_count = 0
for row in csv_reader:
points.append(np.array([float(row[0])/100, float(row[1])/100, float(row[2])/100, 1]).T)
#X_mesh.append(float(row[0])/100)
#Y_mesh.append(float(row[1])/100)
#Z_mesh.append(float(row[2])/100)
points_t = []
for point in points:
points_t.append(np.matmul(T, point))
x_mesh = []
y_mesh = []
z_mesh = []
for point_t in points_t:
x_mesh.append(point_t[0])
y_mesh.append(point_t[1])
z_mesh.append(point_t[2])
self.ax.plot(x_mesh, y_mesh, z_mesh, 'b*', markersize=0.6)
plt.xlim(-400, 400)
plt.ylim(-400, 400)
self.ax.set_zlim(-400, 400)
plt.pause(0.02)
#air = Aircraft()
#air.plot() |
# Generated by Django 3.2 on 2021-04-23 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='group',
name='description',
field=models.TextField(blank=True, default='', max_length=50),
),
]
|
import sys
import pytest
from numpy.testing import assert_allclose
import numpy as np
from keras.backend import theano_backend as KTH
from keras.backend import tensorflow_backend as KTF
from keras.utils.np_utils import convert_kernel
def check_single_tensor_operation(function_name, input_shape, **kwargs):
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
zth = KTH.eval(getattr(KTH, function_name)(xth, **kwargs))
ztf = KTF.eval(getattr(KTF, function_name)(xtf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_two_tensor_operation(function_name, x_input_shape,
y_input_shape, **kwargs):
xval = np.random.random(x_input_shape) - 0.5
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random(y_input_shape) - 0.5
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_composed_tensor_operations(first_function_name, first_function_args,
second_function_name, second_function_args,
input_shape):
''' Creates a random tensor t0 with shape input_shape and compute
t1 = first_function_name(t0, **first_function_args)
t2 = second_function_name(t1, **second_function_args)
with both Theano and TensorFlow backends and ensures the answers match.
'''
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
yth = getattr(KTH, first_function_name)(xth, **first_function_args)
ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)
zth = KTH.eval(getattr(KTH, second_function_name)(yth, **second_function_args))
ztf = KTF.eval(getattr(KTF, second_function_name)(ytf, **second_function_args))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
class TestBackend(object):
def test_linear_operations(self):
check_two_tensor_operation('dot', (4, 2), (2, 4))
check_two_tensor_operation('dot', (4, 2), (5, 2, 3))
check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
axes=(2, 2))
check_single_tensor_operation('transpose', (4, 2))
def test_shape_operations(self):
# concatenate
xval = np.random.random((4, 3))
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random((4, 2))
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
check_single_tensor_operation('permute_dimensions', (4, 2, 3),
pattern=(2, 0, 1))
check_single_tensor_operation('repeat', (4, 1), n=3)
check_single_tensor_operation('flatten', (4, 1))
check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
check_composed_tensor_operations('reshape', {'shape':(4,3,1,1)},
'squeeze', {'axis':2},
(4, 3, 1, 1))
def test_repeat_elements(self):
reps = 3
for ndims in [1, 2, 3]:
shape = np.arange(2, 2+ndims)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = KTH.variable(arr)
arr_tf = KTF.variable(arr)
for rep_axis in range(ndims):
np_rep = np.repeat(arr, reps, axis=rep_axis)
th_rep = KTH.eval(
KTH.repeat_elements(arr_th, reps, axis=rep_axis))
tf_rep = KTF.eval(
KTF.repeat_elements(arr_tf, reps, axis=rep_axis))
assert th_rep.shape == np_rep.shape
assert tf_rep.shape == np_rep.shape
assert_allclose(np_rep, th_rep, atol=1e-05)
assert_allclose(np_rep, tf_rep, atol=1e-05)
def test_tile(self):
shape = (3, 4)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = KTH.variable(arr)
arr_tf = KTF.variable(arr)
n = (2, 1)
th_rep = KTH.eval(KTH.tile(arr_th, n))
tf_rep = KTF.eval(KTF.tile(arr_tf, n))
assert_allclose(tf_rep, th_rep, atol=1e-05)
def test_value_manipulation(self):
val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
# get_value
valth = KTH.get_value(xth)
valtf = KTF.get_value(xtf)
assert valtf.shape == valth.shape
assert_allclose(valth, valtf, atol=1e-05)
# set_value
val = np.random.random((4, 2))
KTH.set_value(xth, val)
KTF.set_value(xtf, val)
valth = KTH.get_value(xth)
valtf = KTF.get_value(xtf)
assert valtf.shape == valth.shape
assert_allclose(valth, valtf, atol=1e-05)
# count_params
assert KTH.count_params(xth) == KTF.count_params(xtf)
def test_elementwise_operations(self):
check_single_tensor_operation('max', (4, 2))
check_single_tensor_operation('max', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('min', (4, 2))
check_single_tensor_operation('min', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('min', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('mean', (4, 2))
check_single_tensor_operation('mean', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('mean', (4, 2, 3), axis=-1, keepdims=True)
check_single_tensor_operation('mean', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('std', (4, 2))
check_single_tensor_operation('std', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('std', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('prod', (4, 2))
check_single_tensor_operation('prod', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('prod', (4, 2, 3), axis=[1, -1])
# does not work yet, wait for bool <-> int casting in TF (coming soon)
# check_single_tensor_operation('any', (4, 2))
# check_single_tensor_operation('any', (4, 2), axis=1, keepdims=True)
#
# check_single_tensor_operation('any', (4, 2))
# check_single_tensor_operation('any', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('argmax', (4, 2))
check_single_tensor_operation('argmax', (4, 2), axis=1)
check_single_tensor_operation('argmin', (4, 2))
check_single_tensor_operation('argmin', (4, 2), axis=1)
check_single_tensor_operation('square', (4, 2))
check_single_tensor_operation('abs', (4, 2))
check_single_tensor_operation('sqrt', (4, 2))
check_single_tensor_operation('exp', (4, 2))
check_single_tensor_operation('log', (4, 2))
check_single_tensor_operation('round', (4, 2))
check_single_tensor_operation('sign', (4, 2))
check_single_tensor_operation('pow', (4, 2), a=3)
check_single_tensor_operation('clip', (4, 2), min_value=0.4,
max_value=0.6)
# two-tensor ops
check_two_tensor_operation('equal', (4, 2), (4, 2))
check_two_tensor_operation('maximum', (4, 2), (4, 2))
check_two_tensor_operation('minimum', (4, 2), (4, 2))
def test_gradient(self):
val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
expth = xth * KTH.exp(xth)
exptf = xtf * KTF.exp(xtf)
lossth = KTH.sum(expth)
losstf = KTF.sum(exptf)
gradth = KTH.gradients(lossth, [expth])
gradtf = KTF.gradients(losstf, [exptf])
zth = KTH.eval(gradth[0])
ztf = KTF.eval(gradtf[0])
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_function(self):
val = np.random.random((4, 2))
input_val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
yth = KTH.placeholder(ndim=2)
ytf = KTF.placeholder(ndim=2)
exp_th = KTH.square(xth) + yth
exp_tf = KTF.square(xtf) + ytf
update_th = xth * 2
update_tf = xtf * 2
fth = KTH.function([yth], [exp_th], updates=[(xth, update_th)])
ftf = KTF.function([ytf], [exp_tf], updates=[(xtf, update_tf)])
function_outputs_th = fth([input_val])[0]
function_outputs_tf = ftf([input_val])[0]
assert function_outputs_th.shape == function_outputs_tf.shape
assert_allclose(function_outputs_th, function_outputs_tf, atol=1e-05)
new_val_th = KTH.get_value(xth)
new_val_tf = KTF.get_value(xtf)
assert new_val_th.shape == new_val_tf.shape
assert_allclose(new_val_th, new_val_tf, atol=1e-05)
def test_rnn(self):
# implement a simple RNN
input_dim = 8
output_dim = 4
timesteps = 5
input_val = np.random.random((32, timesteps, input_dim))
init_state_val = np.random.random((32, output_dim))
W_i_val = np.random.random((input_dim, output_dim))
W_o_val = np.random.random((output_dim, output_dim))
def rnn_step_fn(input_dim, output_dim, K):
W_i = K.variable(W_i_val)
W_o = K.variable(W_o_val)
def step_function(x, states):
assert len(states) == 1
prev_output = states[0]
output = K.dot(x, W_i) + K.dot(prev_output, W_o)
return output, [output]
return step_function
th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
th_inputs = KTH.variable(input_val)
th_initial_states = [KTH.variable(init_state_val)]
last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=None)
th_last_output = KTH.eval(last_output)
th_outputs = KTH.eval(outputs)
assert len(new_states) == 1
th_state = KTH.eval(new_states[0])
tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
tf_inputs = KTF.variable(input_val)
tf_initial_states = [KTF.variable(init_state_val)]
last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
tf_initial_states,
go_backwards=False,
mask=None)
tf_last_output = KTF.eval(last_output)
tf_outputs = KTF.eval(outputs)
assert len(new_states) == 1
tf_state = KTF.eval(new_states[0])
assert_allclose(tf_last_output, th_last_output, atol=1e-04)
assert_allclose(tf_outputs, th_outputs, atol=1e-04)
assert_allclose(tf_state, th_state, atol=1e-04)
# test unroll
unrolled_last_output, unrolled_outputs, unrolled_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=None,
unroll=True,
input_length=timesteps)
unrolled_th_last_output = KTH.eval(unrolled_last_output)
unrolled_th_outputs = KTH.eval(unrolled_outputs)
assert len(unrolled_new_states) == 1
unrolled_th_state = KTH.eval(unrolled_new_states[0])
assert_allclose(th_last_output, unrolled_th_last_output, atol=1e-04)
assert_allclose(th_outputs, unrolled_th_outputs, atol=1e-04)
assert_allclose(th_state, unrolled_th_state, atol=1e-04)
# test unroll with backwards = True
bwd_last_output, bwd_outputs, bwd_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=True,
mask=None)
bwd_th_last_output = KTH.eval(bwd_last_output)
bwd_th_outputs = KTH.eval(bwd_outputs)
assert len(bwd_new_states) == 1
bwd_th_state = KTH.eval(bwd_new_states[0])
bwd_unrolled_last_output, bwd_unrolled_outputs, bwd_unrolled_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=True,
mask=None,
unroll=True,
input_length=timesteps)
bwd_unrolled_th_last_output = KTH.eval(bwd_unrolled_last_output)
bwd_unrolled_th_outputs = KTH.eval(bwd_unrolled_outputs)
assert len(bwd_unrolled_new_states) == 1
bwd_unrolled_th_state = KTH.eval(bwd_unrolled_new_states[0])
assert_allclose(bwd_th_last_output, bwd_unrolled_th_last_output, atol=1e-04)
assert_allclose(bwd_th_outputs, bwd_unrolled_th_outputs, atol=1e-04)
assert_allclose(bwd_th_state, bwd_unrolled_th_state, atol=1e-04)
# test unroll with masking
np_mask = np.random.randint(2, size=(32, timesteps))
th_mask = KTH.variable(np_mask)
masked_last_output, masked_outputs, masked_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=th_mask)
masked_th_last_output = KTH.eval(masked_last_output)
masked_th_outputs = KTH.eval(masked_outputs)
assert len(masked_new_states) == 1
masked_th_state = KTH.eval(masked_new_states[0])
unrolled_masked_last_output, unrolled_masked_outputs, unrolled_masked_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=th_mask,
unroll=True,
input_length=timesteps)
unrolled_masked_th_last_output = KTH.eval(unrolled_masked_last_output)
unrolled_masked_th_outputs = KTH.eval(unrolled_masked_outputs)
assert len(unrolled_masked_new_states) == 1
unrolled_masked_th_state = KTH.eval(unrolled_masked_new_states[0])
assert_allclose(unrolled_masked_th_last_output, masked_th_last_output, atol=1e-04)
assert_allclose(unrolled_masked_th_outputs, masked_th_outputs, atol=1e-04)
assert_allclose(unrolled_masked_th_state, masked_th_state, atol=1e-04)
def test_switch(self):
val = np.random.random()
xth = KTH.variable(val)
xth = KTH.switch(xth >= 0.5, xth * 0.1, xth * 0.2)
xtf = KTF.variable(val)
xtf = KTF.switch(xtf >= 0.5, xtf * 0.1, xtf * 0.2)
zth = KTH.eval(xth)
ztf = KTF.eval(xtf)
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_nn_operations(self):
check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
check_single_tensor_operation('softmax', (4, 10))
check_single_tensor_operation('softplus', (4, 10))
check_single_tensor_operation('sigmoid', (4, 2))
check_single_tensor_operation('hard_sigmoid', (4, 2))
check_single_tensor_operation('tanh', (4, 2))
# dropout
val = np.random.random((100, 100))
xth = KTH.variable(val)
xtf = KTF.variable(val)
zth = KTH.eval(KTH.dropout(xth, level=0.2))
ztf = KTF.eval(KTF.dropout(xtf, level=0.2))
assert zth.shape == ztf.shape
# dropout patterns are different, only check mean
assert np.abs(zth.mean() - ztf.mean()) < 0.05
check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=True)
check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=True)
check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=False)
check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=False)
check_single_tensor_operation('l2_normalize', (4, 3), axis=-1)
check_single_tensor_operation('l2_normalize', (4, 3), axis=1)
def test_conv2d(self):
# TH kernel shape: (depth, input_depth, rows, cols)
# TF kernel shape: (rows, cols, input_depth, depth)
for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
for kernel_shape in [(4, 3, 2, 2), (4, 3, 3, 4)]:
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv2d(xth, kernel_th))
ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
input_shape = (1, 6, 5, 3)
kernel_shape = (3, 3, 3, 2)
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='tf'))
ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='tf'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_conv3d(self):
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
# TH kernel shape: (depth, input_depth, x, y, z)
# TF kernel shape: (x, y, z, input_depth, depth)
# test in dim_ordering = th
for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
for kernel_shape in [(4, 3, 2, 2, 2), (4, 3, 3, 2, 4)]:
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv3d(xth, kernel_th))
ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
# test in dim_ordering = tf
input_shape = (1, 2, 2, 2, 1)
kernel_shape = (2, 2, 2, 1, 1)
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv3d(xth, kernel_th, dim_ordering='tf'))
ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, dim_ordering='tf'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_pool2d(self):
check_single_tensor_operation('pool2d', (5, 3, 10, 12), pool_size=(2, 2),
strides=(1, 1), border_mode='valid')
check_single_tensor_operation('pool2d', (5, 3, 9, 11), pool_size=(2, 2),
strides=(1, 1), border_mode='valid')
check_single_tensor_operation('pool2d', (5, 3, 9, 11), pool_size=(2, 3),
strides=(1, 1), border_mode='valid')
def test_pool3d(self):
check_single_tensor_operation('pool3d', (5, 3, 10, 12, 5), pool_size=(2, 2, 2),
strides=(1, 1, 1), border_mode='valid')
check_single_tensor_operation('pool3d', (5, 3, 9, 11, 5), pool_size=(2, 2, 2),
strides=(1, 1, 1), border_mode='valid')
check_single_tensor_operation('pool3d', (5, 3, 9, 11, 5), pool_size=(2, 3, 2),
strides=(1, 1, 1), border_mode='valid')
def test_random_normal(self):
mean = 0.
std = 1.
rand = KTF.eval(KTF.random_normal((1000, 1000), mean=mean, std=std))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - mean) < 0.01)
assert(np.abs(np.std(rand) - std) < 0.01)
rand = KTH.eval(KTH.random_normal((1000, 1000), mean=mean, std=std))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - mean) < 0.01)
assert(np.abs(np.std(rand) - std) < 0.01)
def test_random_uniform(self):
min = -1.
max = 1.
rand = KTF.eval(KTF.random_uniform((1000, 1000), min, max))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand)) < 0.01)
assert(np.max(rand) <= max)
assert(np.min(rand) >= min)
rand = KTH.eval(KTH.random_uniform((1000, 1000), min, max))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand)) < 0.01)
assert(np.max(rand) <= max)
assert(np.min(rand) >= min)
def test_random_binomial(self):
p = 0.5
rand = KTF.eval(KTF.random_binomial((1000, 1000), p))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - p) < 0.01)
assert(np.max(rand) == 1)
assert(np.min(rand) == 0)
rand = KTH.eval(KTH.random_binomial((1000, 1000), p))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - p) < 0.01)
assert(np.max(rand) == 1)
assert(np.min(rand) == 0)
if __name__ == '__main__':
pytest.main([__file__])
|
#! /usr/bin/env python3
def func1():
try:
for m in map(int, ['1', '2', '3', '4L']):
print(m)
except ValueError as instance:
print(instance)
if __name__=='__main__':
print("\nfunc1()")
func1()
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test dataset module"""
import pytest
from easydict import EasyDict as edict
from mindelec.data import Dataset
from mindelec.geometry import create_config_from_edict
from mindelec.geometry import Disk, Rectangle, TimeDomain, GeometryWithTime
from mindelec.data import BoundaryBC, BoundaryIC
from config import ds_config, src_sampling_config, no_src_sampling_config, bc_sampling_config
ic_bc_config = edict({
'domain': edict({
'random_sampling': False,
'size': [10, 20],
}),
'BC': edict({
'random_sampling': True,
'size': 10,
'with_normal': True,
}),
'IC': edict({
'random_sampling': True,
'size': 10,
}),
'time': edict({
'random_sampling': False,
'size': 10,
})
})
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_dataset_allnone():
with pytest.raises(ValueError):
Dataset()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_dataset():
"""test dataset"""
disk = Disk("src", (0.0, 0.0), 0.2)
rectangle = Rectangle("rect", (-1, -1), (1, 1))
diff = rectangle - disk
time = TimeDomain("time", 0.0, 1.0)
# check datalist
rect_with_time = GeometryWithTime(rectangle, time)
rect_with_time.set_sampling_config(create_config_from_edict(ic_bc_config))
bc = BoundaryBC(rect_with_time)
ic = BoundaryIC(rect_with_time)
dataset = Dataset(dataset_list=bc)
dataset.set_constraint_type("Equation")
c_type1 = {bc: "Equation", ic: "Equation"}
with pytest.raises(ValueError):
dataset.set_constraint_type(c_type1)
no_src_region = GeometryWithTime(diff, time)
no_src_region.set_name("no_src")
no_src_region.set_sampling_config(create_config_from_edict(no_src_sampling_config))
src_region = GeometryWithTime(disk, time)
src_region.set_name("src")
src_region.set_sampling_config(create_config_from_edict(src_sampling_config))
boundary = GeometryWithTime(rectangle, time)
boundary.set_name("bc")
boundary.set_sampling_config(create_config_from_edict(bc_sampling_config))
geom_dict = ['1', '2']
with pytest.raises(TypeError):
Dataset(geom_dict)
geom_dict = {src_region: ["test"]}
with pytest.raises(KeyError):
Dataset(geom_dict)
geom_dict = {src_region: ["domain", "IC"],
no_src_region: ["domain", "IC"],
boundary: ["BC"]}
dataset = Dataset(geom_dict)
with pytest.raises(ValueError):
print(dataset[0])
with pytest.raises(ValueError):
len(dataset)
with pytest.raises(ValueError):
dataset.get_columns_list()
with pytest.raises(ValueError):
dataset.create_dataset(batch_size=ds_config.train.batch_size,
shuffle=ds_config.train.shuffle,
prebatched_data=True,
drop_remainder=False)
|
"""
时间:2019/9/23
作者:大发
功能:练习用 导入
"""
# import c2_some_module
# result = c2_some_module.f(5)
# pi = c2_some_module.PI
# from c2_some_module import f , g , PI
# result = g (5 , PI )
import c2_some_module as csm
from c2_some_module import PI as pi , g as gf
r1 = csm.f(pi)
r2 = gf(6 , pi) |
# -*-coding:utf-8 -*-
__author__ = 'Administrator'
from PyQt5 import QtCore, QtGui, QtWidgets
from quote_api import *
'''
银行股息率计算
接收主页面的更新股息率的信号
从quote_api接口获取实时行情数据
emit获取的实时行情数据到主页面,主页面更新银行信息表
'''
class BankThread(QtCore.QThread):
df_bank_out = QtCore.pyqtSignal(pd.DataFrame)
signal_df_ah_premium = QtCore.pyqtSignal(pd.DataFrame)
def __init__(self, parent=None):
super(BankThread, self).__init__(parent)
self.is_running = True
'''
银行基本信息目录及文件名
ts_code name
000001.SZ 平安银行
002142.SZ 宁波银行
002807.SZ 江阴银行
'''
self.bank_basic_info_dir = r"C:\quanttime\src\watch_time\bank_info.csv"
'''
a_code name hk_code
002936.SZ 郑州银行 HK.6196
600016.SH 民生银行 HK.1988
600036.SH 招商银行 HK.3968
'''
self.bank_AH_dir = r'C:\quanttime\src\watch_time\bank_A_H.csv'
# tushare connect context
token = "17e7755e254f02cc312b8b7e22ded9a308924147f8546fdfbe653ba1"
ts.set_token(token)
# ts 授权
self.pro = ts.pro_api()
# ================================================
def update_bank_industry_table(self):
'''
slot:更新银行业信息表
由主界面的银行信息更新按钮pushbutton的clicked signal触发,该slot执行
需要读取tushare获取的所有股票的基本信息,从内获取industry==银行的股票,all_stock_info_ts.csv由维护程序定期维护
本程序只是读取all_stock_info_ts.csv,不对该表文件进行维护
1、读取stock基本信息目录(C:\quanttime\data\basic_info)下的all_stock_info_ts.csv文件,获取行业为银行的所有内容
2、将该信息转存到程序运行目录(C:\quanttime\src\watch_time)下,命名为bank_info.csv
:return:
'''
select_columns = ["ts_code", "name", "industry"]
bank = pd.read_csv(r"C:\quanttime\data\basic_info\all_stock_info_ts.csv",
usecols=select_columns, encoding="gbk", index_col=["ts_code"])
bank = bank[bank["industry"] == "银行"]
bank[["name"]].to_csv(self.bank_basic_info_dir, encoding="gbk")
# ==============================================================
def process_bank_dividend(self):
'''
处理银行的分红信息,该方法为slot,由主页面线程处理银行分红信息的pushbutton发射信号,触发该slot函数
:return:
'''
bank = pd.read_csv(self.bank_basic_info_dir, encoding="gbk", index_col=["ts_code"])
# 获取分红信息
df_bank_dividend = self.get_dividend_by_tushare(bank.index.tolist())
df_bank_dividend = df_bank_dividend.set_index("ts_code")
df_bank_dividend = pd.merge(df_bank_dividend, bank, left_index=True, right_index=True)
# 获取实时股价,先从通达信获取
df_bank_price = get_quote_by_tdx(bank.index.tolist())
if df_bank_price.empty:
print("tdx获取实时行情失败,从tushare获取")
df_bank_price = get_quote_by_ts(bank.index.tolist())
# 如果从tushare也没有获取到实时行情则return
if df_bank_price.empty:
print("从tushare获取也失败")
return
df_bank_price['code'] = df_bank_price['code'].apply(self.code_add_market)
df_bank_price = df_bank_price.set_index("code")
df_bank = pd.merge(df_bank_dividend, df_bank_price, left_index=True, right_index=True)
columns_need_2_float = ["cash_div_tax", "price"]
df_bank[columns_need_2_float] = df_bank[columns_need_2_float].apply(pd.to_numeric)
df_bank["div_rate"] = df_bank["cash_div_tax"] / df_bank["price"]
df_bank = df_bank.sort_values(by=["div_rate"], ascending=False)
df_bank["div_rate"] = df_bank["div_rate"].map(self.display_percent_format)
print(df_bank)
self.df_bank_out.emit(df_bank)
# ==============================================================
def get_dividend_by_tushare(self, ts_code_list):
'''
tushare接口获取分红信息
:param ts_code_list: code list,需要判断是否满足tushare的code格式要求
:return: 包含分红信息的df
'''
# end_date:分红年度 cash_div_tax:每股分红税前 record_date:股权登记日 pay_date:派息日
columns_name = ["ts_code", "end_date", "cash_div_tax", "record_date", "pay_date"]
get_feilds = 'ts_code,end_date,cash_div_tax,record_date,pay_date'
df_bank_dividend = pd.DataFrame(columns=columns_name)
curr_year = datetime.today().year
end_dividend_date = datetime(curr_year-1, 12, 31).date().strftime("%Y%m%d")
# df_bank_dividend = self.pro.dividend(ts_code=ts_code_list.pop(0), fields=get_feilds)
for ts_code in ts_code_list:
df_tmp = self.pro.dividend(ts_code=ts_code, fields=get_feilds)
if df_tmp.empty:
continue
# 只取第一行最新的记录,旧的分红记录不需要
# df_tmp = df_tmp.loc[df_tmp.index[0], columns_name]
df_tmp = df_tmp[df_tmp['end_date'] == end_dividend_date]
df_tmp = df_tmp.iloc[0, :]
df_bank_dividend = df_bank_dividend.append(df_tmp, ignore_index=True)
return df_bank_dividend
# ===================================================
def get_AH_premium(self):
'''
获取AH股折溢价情况
A股与H股code从运行文件夹内的bank_A_H.csv获取
:return:
'''
ah = pd.read_csv(self.bank_AH_dir, encoding="gbk")
df_a_price = get_quote_by_futu(ah['a_code'].tolist())
df_hk_price = get_quote_by_futu(ah['hk_code'].tolist())
if not df_a_price.empty and not df_hk_price.empty:
ah = pd.merge(ah, df_a_price, left_on='a_code', right_on='code')
ah = ah.drop(columns=['code'])
ah = pd.merge(ah, df_hk_price, left_on='hk_code', right_on='code', suffixes=['_a', '_h'])
ah = ah.drop(columns=['code'])
self.signal_df_ah_premium.emit(ah)
else:
self.signal_df_ah_premium.emit(pd.DataFrame())
# ========================================================
@staticmethod
def code_add_market(x):
'''
6位纯数字code添加代表市场信息的后缀,6--SH,0,3--SZ
:param x:
:return:
'''
x = str(x)
if x[0] == '6':
return x + '.SH'
elif x[0] == '3':
return x + '.SZ'
elif x[0] == '0':
return x + '.SZ'
else:
return '000000'
# =========================================
@staticmethod
def display_percent_format(x):
'''
功能:小数按照百分数%显示,保留两位小数
'''
try:
data = float(x)
except ValueError:
print("input is not numberic")
return 0
return "%.2f%%" % (data * 100)
# ==============
if __name__ == "__main__":
theBank = BankThread()
df = theBank.get_dividend_by_tushare(["000001.SZ"])
print(df)
|
from flask import Flask, request, redirect, render_template
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route("/welcome")
def welcome_new():
welcome_user = request.args.get("username")
return render_template("welcome.html", username = welcome_user)
@app.route("/", methods=["POST", "GET"])
def index():
if request.method == "GET":
return render_template("index.html")
#initialize empty errors
username_error_msg = ""
password_error_msg = ""
verify_password_error_msg = ""
email_error_msg = ""
username = request.form["username"]
user_password = request.form["user_password"]
password_confirm = request.form["password_confirm"]
user_email = request.form["user_email"]
if username == "":
username_error_msg = "Please enter a username."
if len(username) < 3:
username_error_msg = "Username must be 3-20 characters."
if len(username) > 20:
username_error_msg = "Username must be 3-20 characters."
if " " in username:
username_error_msg = "Username cannot contain a space."
if user_password == "":
password_error_msg = "Please enter a password."
if password_confirm == "":
verify_password_error_msg = "Please confirm your password."
if user_password != password_confirm:
password_error_msg = "Passwords do not match."
verify_password_error_msg = "Passwords do not match."
if "@" not in user_email:
email_error_msg = "Please enter a valid email address."
if "." not in user_email:
email_error_msg = "Please enter a valid email address."
if " " in user_email:
email_error_msg = "Please enter a valid email address."
if username_error_msg == "" and password_error_msg == "" and verify_password_error_msg == "" and email_error_msg == "":
return redirect("/welcome?username=" + username)
else:
return render_template("index.html",
#username = username,
#user_password = user_password,
#verify_password = verify_password,
#user_email = user_email,
username_error = username_error_msg,
password_error = password_error_msg,
verify_password_error = verify_password_error_msg,
email_error = email_error_msg
)
app.run() |
"""
@Author: yanzx
@Date: 2021/4/7 22:50
@Description:
"""
from rest_framework.authentication import BaseAuthentication, TokenAuthentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_jwt.serializers import VerifyJSONWebTokenSerializer
class TokenAuth():
def authenticate(self, request):
token={"token":None}
# print(request.META.get("HTTP_TOKEN"))
token["token"] = request.META.get('HTTP_TOKEN')
print(token)
valid_data = VerifyJSONWebTokenSerializer().validate(token)
user = valid_data['user']
print(user)
if user:
return
else:
raise AuthenticationFailed('认证失败')
|
from fastapi import APIRouter, Depends, HTTPException
from fastapi.security import APIKeyHeader
from api import prediction
from api import healthcheck
api_router = APIRouter()
router = APIRouter()
API_KEY_SCHEME = APIKeyHeader(name='x-api-key')
async def verify_api_key(api_key: str = Depends(API_KEY_SCHEME)):
if api_key != "dd74decc-8825-4a49-b9bc-e4608249d612":
raise HTTPException(status_code=400, detail="x-api-key header invalid")
return api_key
api_router.include_router(
prediction.router,
prefix="/prediction",
tags=["prediction"],
# dependencies=[Depends(verify_api_key)],
)
api_router.include_router(
healthcheck.router,
tags=["healthcheck"],
)
|
'''
The Daily Weather app obtains the most recent weather data from weather.gov and emails it to the specified recipients at 9am every day.
The default station is set to Central Park, NYC.
Please ensure that you have entered your email address and password in the send_html_file module.
Note: usage requires installation of the schedule package ($ pip install schedule)
Created on 10 Jan 2018
@author: Sameer Tulshyan
'''
from Get_Weather_Data import get_weather_data
from Create_Html_file import create_html_report
from Send_Html_file import send_gmail
from collections import OrderedDict
from time import sleep
from pprint import pprint
import schedule
def job():
"""Function that will be called based on the scheduled frequency"""
pprint(schedule.jobs)
weather_dict, icon = get_weather_data('KNYC') #default station is KNYC, can be changed to any valid station code
weather_dict_ordered = OrderedDict(sorted(weather_dict.items())) #dictionary must be ordered to align with our HTML template
email_file = "Email_File.html"
create_html_report(weather_dict_ordered, icon, email_file)
send_gmail(email_file)
schedule.every.day.at("9:00").do(job) #note that the time is in 24 hours format, so this refers to 9:00 am
#infinite loop required for usage of the schedule package
while True:
schedule.run_pending()
sleep(1)
|
#! /usr/bin/env python
#This code is a python implementation of the atom counts features used in
#Ballester PJ, Mitchell JB. A machine learning approach to predicting protein-ligand binding affinity with applications to molecular docking. Bioinformatics. 2010; 26:1169-75.
#Inspiration for the CartesianPoint,Atom and Loader classes gotten from Durrant, J. D. and J. A. McCammon (2011). "BINANA: A novel algorithm for ligand-binding
# characterization." J Mol Graph Model 29(6): 888-893.
import numpy as np
#import h5py
import fnmatch
#from __future__ import print_function
import math
import os
import sys
import textwrap
class CartesianPoint:
x=88888.0
y=88888.0
z=88888.0
def __init__ (self, x, y ,z):
self.x = x
self.y = y
self.z = z
def distance_to(self,a_point):
return math.sqrt(math.pow(self.x - a_point.x,2) + math.pow(self.y - a_point.y,2) + math.pow(self.z- a_point.z,2))
class Atom:
def __init__ (self):
self.record_name = ""
self.atom_name = ""
self.residue_name = ""
self.coordinates = CartesianPoint(88888.0,88888.0,88888.0)
self.elem_sym = ""
self.pdb_index = ""
#self.line=""
#self.atom_type=""
self.atom_no = 0
self.res_id = 0
self.chain_id = ""
def read_pdb_line(self, line):
self.line = line
self.atom_name = line[12:16].strip()
self.coordinates = CartesianPoint(float(line[30:38]), float(line[38:46]), float(line[46:54]))
if self.elem_sym == "":
# guessing elem from name
first_two_letters = self.atom_name[0:2].strip().upper()
if first_two_letters=='BR':
self.elem_sym='BR'
elif first_two_letters=='AL':
self.elem_sym='AL'
elif first_two_letters=='CL':
self.elem_sym='CL'
elif first_two_letters=='BI':
self.elem_sym='BI'
elif first_two_letters=='AS':
self.elem_sym='AS'
elif first_two_letters=='AG':
self.elem_sym='AG'
elif first_two_letters=='LI':
self.elem_sym='LI'
elif first_two_letters=='MG':
self.elem_sym='MG'
elif first_two_letters=='MN':
self.elem_sym='MN'
elif first_two_letters=='RH':
self.elem_sym='RH'
elif first_two_letters=='ZN':
self.elem_sym='ZN'
elif first_two_letters=='FE':
self.elem_sym='FE'
else: #So,we use just the first letter.
# Remove any number from elem_sym
self.elem_sym = self.atom_name
self.elem_sym = self.elem_sym.replace('0','')
self.elem_sym = self.elem_sym.replace('1','')
self.elem_sym = self.elem_sym.replace('2','')
self.elem_sym = self.elem_sym.replace('3','')
self.elem_sym = self.elem_sym.replace('4','')
self.elem_sym = self.elem_sym.replace('5','')
self.elem_sym = self.elem_sym.replace('6','')
self.elem_sym = self.elem_sym.replace('7','')
self.elem_sym = self.elem_sym.replace('8','')
self.elem_sym = self.elem_sym.replace('9','')
self.elem_sym = self.elem_sym.replace('@','')
self.elem_sym = self.elem_sym[0:1].strip().upper()
elem_type = self.elem_sym[0:2].strip().upper()
if elem_type == 'C':
self.atom_no = 6
elif elem_type == 'N':
self.atom_no = 7
elif elem_type == 'O':
self.atom_no = 8
elif elem_type == 'F':
self.atom_no = 9
elif elem_type == 'P':
self.atom_no = 15
elif elem_type == 'S':
self.atom_no = 16
elif elem_type == 'SD':
self.atom_no = 16
elif elem_type == 'SG':
self.atom_no = 16
elif elem_type == 'CL':
self.atom_no = 17
elif elem_type == 'BR':
self.atom_no = 35
elif elem_type == 'I':
self.atom_no = 53
self.pdb_index = line[6:11].strip()
self.residue_name = line[17:20]
self.residue_name = " " + self.residue_name[-3:] # this only uses the rightmost three characters, essentially removing unique rotamer identification
try: self.res_id = int(line[22:26]) # because it's possible the pdbqt might not have any resid entries.
except: pass
self.chain_id = line[21]
if self.residue_name.strip() == "": self.residue_name = " MOL"
self.record_name = line[0:6]
class Loader:
def __init__ (self):
self.all_atoms = {}
self.non_protein_atoms = {}
self.ligand_com = CartesianPoint(88888.0,88888.0,88888.0)
self.max_x = -8888.88
self.min_x = 8888.88
self.max_y = -8888.88
self.min_y = 8888.88
self.max_z = -8888.88
self.min_z = 8888.88
self.protein_resnames = ["ALA", "ARG", "ASN", "ASP", "ASH", "ASX", "CYS", "CYM", "CYX", "GLN", "GLU", "GLH", "GLX", "GLY", "HIS", "HID", "HIE", "HIP", "ILE", "LEU", "LYS", "LYN", "MET", "PHE", "PRO", "SER", "THR", "TRP", "TYR", "VAL"]
def PDBLoad(self, file_name, min_x=-8888.88, max_x=8888.88, min_y=-8888.88, max_y=8888.88, min_z=-8888.88, max_z=8888.88):
auto_index = 1
self.__init__()
# Now load the file into a list
file = open(file_name,"r")
lines = file.readlines()
file.close()
atom_already_loaded = [] # going to keep track of atomname_resid_chain pairs, to make sure redundants aren't loaded. This basically
# gets rid of rotomers, I think.
print file_name
for t in range(0,len(lines)):
#print "OK"
line=lines[t]
if line[:3] == "END":
#print "OK"
t = textwrap.wrap("WARNING: END or ENDMDL encountered in " + file_name + ". Everyline after this will be ignored. If your PDB file has multiple pose or is an ensemble structure, split them up into individual pose or model", 80)
print "\n".join(t) + "\n"
print line
break
if len(line) >= 7:
#print "OK"
if line[0:4]=="ATOM" or line[0:6]=="HETATM": # Load atom data (coordinates, etc.)
temp_atom = Atom()
temp_atom.read_pdb_line(line)
#print "OK"
if temp_atom.coordinates.x > min_x and temp_atom.coordinates.x < max_x and temp_atom.coordinates.y > min_y and temp_atom.coordinates.y < max_y and temp_atom.coordinates.z > min_z and temp_atom.coordinates.z < max_z:
#print "OK"
if self.max_x < temp_atom.coordinates.x: self.max_x = temp_atom.coordinates.x
if self.max_y < temp_atom.coordinates.y: self.max_y = temp_atom.coordinates.y
if self.max_z < temp_atom.coordinates.z: self.max_z = temp_atom.coordinates.z
if self.min_x > temp_atom.coordinates.x: self.min_x = temp_atom.coordinates.x
if self.min_y > temp_atom.coordinates.y: self.min_y = temp_atom.coordinates.y
if self.min_z > temp_atom.coordinates.z: self.min_z = temp_atom.coordinates.z
key = temp_atom.atom_name.strip() + "_" + str(temp_atom.res_id) + "_" + temp_atom.residue_name.strip() + "_" + temp_atom.chain_id.strip() # this string unique identifies each atom
if not key in atom_already_loaded or not temp_atom.residue_name.strip() in self.protein_resnames: # so either the atom hasn't been loaded, or else it's a non-protein atom
# so note that non-protein atoms can have redundant names, but protein atoms cannot.
# This is because protein residues often contain rotamers
atom_already_loaded.append(key) # so each atom can only be loaded once. No rotamers.
self.all_atoms[auto_index] = temp_atom # So you're actually reindexing everything here.
if not temp_atom.residue_name[-3:] in self.protein_resnames: self.non_protein_atoms[auto_index] = temp_atom;#print "OK"
auto_index = auto_index + 1
class AtomCountFeaturizer:
def __init__ (self,complex_pdb):
self.complex_pdb = complex_pdb
self.Complex_PDB = Loader()
self.Complex_PDB.PDBLoad(complex_pdb)
def calc_feature(self):
feature_dict = {
'6.6' : 0,
'7.6' :0,
'8.6' :0,
'16.6' :0,
'6.7' :0,
'7.7' :0,
'8.7' :0,
'16.7' :0,
'6.8' :0,
'7.8' :0,
'8.8' :0,
'16.8' :0,
'6.9' :0,
'7.9' :0,
'8.9' :0,
'16.9' : 0,
'6.15' :0,
'7.15' :0,
'8.15' :0,
'16.15' :0,
'6.16' :0,
'7.16' :0,
'8.16' :0,
'16.16' :0,
'6.17' :0,
'7.17' :0,
'8.17' :0,
'16.17' :0,
'6.35' :0,
'7.35' :0,
'8.35' :0,
'16.35' :0,
'6.53' :0,
'7.53' :0,
'8.53' :0,
'16.53' :0
}
feature_list = ['6.6','7.6','8.6','16.6','6.7','7.7','8.7','16.7','6.8','7.8','8.8','16.8',
'6.9','7.9','8.9','16.9','6.15','7.15','8.15','16.15','6.16','7.16','8.16',
'16.16','6.17','7.17','8.17','16.17','6.35','7.35','8.35','16.35','6.53','7.53','8.53','16.53']
elem_interest = [6,7,8,9,15,16,17,35,53]
for jatom in self.Complex_PDB.non_protein_atoms:
for iatom in self.Complex_PDB.all_atoms:
if self.Complex_PDB.all_atoms[iatom].res_id != self.Complex_PDB.non_protein_atoms[jatom].res_id:
dist = 0.0
dist = self.Complex_PDB.non_protein_atoms[jatom].coordinates.distance_to(self.Complex_PDB.all_atoms[iatom].coordinates)
#12 Arngstrom distance
prot_atom_no = 0
lig_atom_no = 0
lig_atom_no = self.Complex_PDB.non_protein_atoms[jatom].atom_no
prot_atom_no = self.Complex_PDB.all_atoms[iatom].atom_no
if dist < 12:
if (lig_atom_no in elem_interest) and (prot_atom_no in elem_interest):
#key_list = sorted([lig_atom_no,prot_atom_no])
key_list = [lig_atom_no,prot_atom_no]
key = str(key_list[1])+'.'+str(key_list[0])
feature_dict[key] = feature_dict.get(key, 0) + 1
for feat in feature_list:
print feat,
print 'PDB'
for feat in feature_list:
print str(feature_dict[feat]),
print self.complex_pdb
return feature_dict
def exec_func(File):
rf = AtomCountFeaturizer(File).calc_feature()
return rf
DIRIN='/fccc/users/karanicolaslab/adeshiy'
INFILE=sys.argv[1]
exec_func(INFILE)
|
import unittest
from dxpy.task import configs
from dxpy.task.exceptions import UnknownConfigName
# TODO: add unittests
class TestConfigs(unittest.TestCase):
def setUp(self):
self.config_name = 'config_unittest'
class ConfigUnitTest:
def __init__(self):
self.field1 = 'field1'
configs.CONFIGS[self.config_name] = None
configs.CONFIGS_CLS[self.config_name] = ConfigUnitTest
pass
def tearDown(self):
configs.CONFIGS.pop(self.config_name)
configs.CONFIGS_CLS.pop(self.config_name)
def test_get_config(self):
self.assertEqual(configs.get_config(self.config_name).field1, 'field1')
def test_unknown_config_name(self):
name = 'some_invalid_config_name'
with self.assertRaises(UnknownConfigName):
configs.get_config(name)
def test_set_config_by_name_key(self):
configs.set_config_by_name_key(self.config_name, 'field1', 'test_set')
self.assertEqual(configs.get_config(
self.config_name).field1, 'test_set')
configs.clear_config(self.config_name)
def test_clear_configs(self):
configs.set_config_by_name_key(self.config_name, 'field1', 'modified')
self.assertEqual(configs.get_config(
self.config_name).field1, 'modified')
configs.clear_config(self.config_name)
self.assertEqual(configs.get_config(self.config_name).field1, 'field1')
|
# coding: utf-8
"""
HCE project, Python bindings, Distributed Tasks Manager application.
PostProcessingModuleClass is a base class for postprocess modules.
@package: dc_postprocessor
@file PostProcessingModuleClass.py
@author Alexander Vybornyh <alexander.hce.cluster@gmail.com>
@link: http://hierarchical-cluster-engine.com/
@copyright: Copyright © 2013-2017 IOIX Ukraine
@license: http://hierarchical-cluster-engine.com/license/
@since: 0.1
"""
# This object is a run at once module processing
class PostProcessingModuleClass(object):
# Default initialization
def __init__(self, getConfigOption=None, log=None):
self.getConfigOption = getConfigOption
self.logger = log
# # initialization interface method
#
# @param - None
# @return - None
def init(self):
pass
# # process batch interface method
#
# @param batchObj - batch instance
# @return - None
def processBatch(self, batchObj):
return batchObj
# # process batch item interface method
#
# @param batchItemObj - batch item instance
# @return - None
def processBatchItem(self, batchItemObj):
return batchItemObj
|
from dcmodule import load_with_args, result_dump
if __name__ == "__main__":
with load_with_args() as _iotuple:
_stdin, _stdout = _iotuple
result_dump(True, data={
"stdin": _stdin,
"stdout": _stdout,
})
|
# -*- coding:UTF8 -*-
import re
import traceback
try:
n = input()
pattern = re.compile(r'\d+') # 查找数字
result1 = list(map(int,pattern.findall(n)))
result1.sort(reverse=True)
print(result1[0])
except:
traceback.print_exc()
pass |
import requests as req
def main():
URL_MENSAJE = "https://api.telegram.org/bot1943187472:AAHl6kFfARl1MiCIs09rEcADcZR0asEkIyY/sendMessage?chat_id=-589260794&text=Hola que tal"
consulta = req.get(URL_MENSAJE)
if (consulta.status_code == 200):
print("Mensaje enviado")
else:
print("ERROR al enviar mensaje")
if (__name__ == "__main__"):
main()
|
# -*- coding: utf-8 -*-
from flask import Flask, url_for
from flask import render_template
import pymysql
app = Flask(__name__)
class GetMysqlData(object):
def __init__(self, table='douban_books_info'):
self.con = pymysql.connect(host='127.0.0.1', port=3306, user="root", password="", db="test", charset='utf8mb4')
self.cursor = self.con.cursor()
self.table = table
def get_high_score_data(self,limit_num=100):
sql="select book_name,chinese_author,publisher,rating_nums from {} order by rating_nums desc limit {}".format(self.table,limit_num)
self.cursor.execute(sql)
results=self.cursor.fetchall()
return results
@app.route('/index/<int:num>')
def index(num):
num=num or 100
results=GetMysqlData().get_high_score_data(limit_num=num)
return render_template('base.html',results=results)
@app.route('/login/<name>', methods=['GET', 'POST'])
def login(name):
return 'login %s' % name
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5001)
|
from __future__ import print_function, division
import os
import time
import tensorflow as tf
import numpy as np
from .loss import get_loss, get_mean_iou
from .optimizer import get_optimizer
from utils.eval_segm import mean_IU
class Trainer(object):
"""
Trains a CU-Net instance
:param net: the CU-Net-net instance to train
:param opt_kwargs: (optional) kwargs passed to the optimizer
:param loss_kwargs: (optional) kwargs passed to the loss function
"""
def __init__(self, net, opt_kwargs={}, loss_kwargs={}):
self.net = net
self.label = tf.placeholder("float", shape=[None, None, None, self.net.n_class])
# self.label_class = tf.argmax(self.label, axis=-1)
self.label_class = self.label
self.global_step = tf.placeholder(tf.int64)
self.opt_kwargs = opt_kwargs
self.loss_kwargs = loss_kwargs
self.loss_type = loss_kwargs.get("loss_name", "cross_entropy")
def _initialize(self, batch_steps_per_epoch, output_path):
self.loss, self.final_loss = get_loss(self.net.logits, self.label, self.loss_kwargs)
self.acc, self.acc_update = get_mean_iou(self.net.predictor_class, self.label_class, num_class=self.net.n_class, ignore_class_id=0)
# Isolate the variables stored behind the scenes by the metric operation
running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="m_metrics")
self.reset_metrics_op = tf.variables_initializer(var_list=running_vars)
self.optimizer, self.ema, self.learning_rate_node = get_optimizer(self.loss, self.global_step,
batch_steps_per_epoch, self.opt_kwargs)
init = tf.global_variables_initializer()
if not output_path is None:
output_path = os.path.abspath(output_path)
if not os.path.exists(output_path):
print("Allocating '{:}'".format(output_path))
os.makedirs(output_path)
return init
def train(self, data_provider, output_path, restore_file=None, batch_steps_per_epoch=1024, epochs=250,
gpu_device='0', max_spat_dim=5000000):
"""
Launches the training process
:param data_provider:
:param output_path:
:param restore_path:
:param batch_size:
:param batch_steps_per_epoch:
:param epochs:
:param keep_prob:
:param gpu_device:
:param max_spat_dim:
:return:
"""
print("Epochs: " + str(epochs))
print("Batch Size Train: " + str(data_provider.batch_size_training))
print("Batchsteps per Epoch: " + str(batch_steps_per_epoch))
if not output_path is None:
save_path = os.path.join(output_path, "model")
if epochs == 0:
return save_path
init = self._initialize(batch_steps_per_epoch, output_path)
val_size = data_provider.size_validation
# gpu_options = tf.GPUOptions(visible_device_list=gpu_device)
# session_conf = tf.ConfigProto()
# session_conf.gpu_options.visible_device_list=gpu_device
# session_conf.gpu_options.per_process_gpu_memory_fraction = 0.4
gpu_options = tf.GPUOptions(visible_device_list=str(gpu_device), per_process_gpu_memory_fraction=0.7)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
# with tf.Session(config=session_conf) as sess:
sess.run(init)
sess.run(tf.local_variables_initializer())
if restore_file != None:
print("Loading Checkpoint.")
self.net.restore(sess, restore_file)
else:
print("Starting from scratch.")
print("Start optimization")
bestAcc = 1111110.0
shown_samples = 0
for epoch in range(epochs):
total_loss = 0
total_loss_final = 0
total_acc = 0
lr = 0
time_step_train = time.time()
for step in range((epoch * batch_steps_per_epoch), ((epoch + 1) * batch_steps_per_epoch)):
sess.run(self.reset_metrics_op)
batch_img, batch_mask = data_provider.next_data('training')
skipped = 0
if batch_img is None:
print("No Training Data available. Skip Training Path.")
break
while batch_img.shape[1] * batch_img.shape[2] > max_spat_dim:
batch_img, batch_mask = data_provider.next_data('training')
skipped = skipped + 1
if skipped > 100:
print("Spatial Dimension of Training Data to high. Aborting.")
return save_path
# Run training
if self.final_loss is not None:
_, loss, final_loss, acc, lr = sess.run \
([self.optimizer, self.loss, self.final_loss, self.acc_update, self.learning_rate_node],
feed_dict={self.net.input_tensor: batch_img,
self.label: batch_mask,
self.global_step: step})
total_loss_final += final_loss
else:
_, loss, acc, lr = sess.run \
([self.optimizer, self.loss, self.acc_update, self.learning_rate_node],
feed_dict={self.net.input_tensor: batch_img,
self.label: batch_mask,
self.global_step: step})
acc = sess.run(self.acc)
shown_samples = shown_samples + batch_img.shape[0]
if self.loss_type is "cross_entropy_sum":
shape = batch_img.shape
loss /= shape[1] * shape[2] * shape[0]
total_loss += loss
total_acc += acc
total_loss = total_loss / batch_steps_per_epoch
total_loss_final = total_loss_final / batch_steps_per_epoch
total_acc = total_acc / batch_steps_per_epoch
time_used = time.time() - time_step_train
train_total_loss = total_loss
self.output_epoch_stats_train(epoch + 1, total_loss, total_loss_final, total_acc, shown_samples, lr, time_used)
### VALIDATION
total_loss = 0
total_loss_final = 0
total_acc = 0
total_m_iou = 0
time_step_val = time.time()
for step in range(0, val_size):
sess.run(self.reset_metrics_op)
batch_img, batch_mask = data_provider.next_data('validation')
if batch_img is None:
print("No Validation Data available. Skip Validation Path.")
break
# Run validation
if self.final_loss is not None:
loss, final_loss, acc, batch_pred = sess.run([self.loss, self.final_loss, self.acc_update, self.net.predictor],
feed_dict={self.net.input_tensor: batch_img, self.label: batch_mask})
total_loss_final += final_loss
else:
loss, acc, batch_pred = sess.run([self.loss, self.acc_update, self.net.predictor],
feed_dict={self.net.input_tensor: batch_img, self.label: batch_mask})
acc = sess.run(self.acc)
iou_list = []
for pred, label in zip(batch_pred, batch_mask):
pred = np.argmax(pred, axis=-1)
mask = np.argmax(label, axis=-1)
iou = mean_IU(pred, mask)
iou_list.append(iou)
m_iou = np.mean(iou_list)
total_m_iou += m_iou
if self.loss_type is "cross_entropy_sum":
shape = batch_img.shape
loss /= shape[1] * shape[2] * shape[0]
total_loss += loss
total_acc += acc
if val_size != 0:
total_loss = total_loss / val_size
total_loss_final = total_loss_final / val_size
total_acc = total_acc / val_size
total_m_iou /= val_size
time_used = time.time() - time_step_val
self.output_epoch_stats_val(epoch + 1, total_loss, total_loss_final, total_acc, total_m_iou, time_used)
data_provider.restart_val_runner()
if not output_path is None:
if total_loss <= bestAcc: #or (epoch + 1) % 8 == 0:
# if total_acc > bestAcc:
bestAcc = total_loss
save_pathAct = save_path + str(epoch + 1)
print("Saving checkpoint")
self.net.save(sess, save_pathAct)
data_provider.stop_all()
print("Optimization Finished!")
print("Best Val Loss: " + str(bestAcc))
return save_path
def output_epoch_stats_train(self, epoch, total_loss, total_loss_final, acc, shown_sample, lr, time_used):
print(
"TRAIN: Epoch {:}, Average loss: {:.6f} final: {:.6f} acc: {:.4f}, training samples shown: {:}, learning rate: {:.6f}, time used: {:.2f}".format(
epoch, total_loss, total_loss_final, acc, shown_sample, lr, time_used))
def output_epoch_stats_val(self, epoch, total_loss, total_loss_final, acc, m_iou, time_used):
print(
"VAL: Epoch {:}, Average loss: {:.6f} final: {:.6f} acc: {:.4f} mIoU: {:.4f}, time used: {:.2f}".format(epoch, total_loss,
total_loss_final, acc, m_iou, time_used)) |
from InfopulseWebChatApp.models import ChatUser, Ban
class ChatUserService:
@staticmethod
def save_user(user_form):
if user_form.is_valid():
user_name=user_form.cleaned_data["name"]
user_login=user_form.cleaned_data["login"]
user_password=user_form.cleaned_data["password"]
chat_user=ChatUser(name=user_name,login=user_login,password=user_password,role_id_id=1)
try:
chat_user.save()
return True
except ValueError:
return False
else:
return False
@staticmethod
def verify_credentials(userlogin,userpassword):
chat_user = ChatUser.objects.filter(login=userlogin,password=userpassword).first()
#select * from chat_users where login=userlogin and password=userpassword
if(chat_user==None):
return None
return chat_user
@staticmethod
def ban_verify(user):
ban =Ban.objects.filter(sender_id=user).first()
if(ban!=None):
return True
else:
return False
|
from dejmps import dejmps_protocol_bob, get_fidelity_phi00
from netqasm.sdk import EPRSocket
from netqasm.sdk.external import NetQASMConnection, Socket, get_qubit_state
def main(app_config=None):
# Create a socket for classical communication
classical_socket = Socket("bob", "alice")
# Create a EPR socket for entanglement generation
epr_socket = EPRSocket("alice")
# Initialize Bob's NetQASM connection
bob = NetQASMConnection(
app_name=app_config.app_name,
epr_sockets=[epr_socket]
)
with bob:
# Receive EPR Pairs
q = epr_socket.recv(number=2)
q1, q2 = q[0], q[1]
print("Bob received the EPR pairs")
# Apply 3->1 method and print success result
print("Bob is running the dejmps protocol...")
if dejmps_protocol_bob(q1, q2, bob, classical_socket):
print("Bob successfully created an EPR Pair with Alice")
qubit_state = get_qubit_state(q1, reduced_dm=False)
fidelity = float(get_fidelity_phi00(qubit_state))
else:
print("Bob failed to created an EPR Pair with Alice")
fidelity = None
return {
"fidelity": fidelity
}
if __name__ == "__main__":
main()
|
import re
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
import pickle
import sys
from sqlalchemy import create_engine
import nltk
nltk.download(['punkt', 'wordnet','stopwords','averaged_perceptron_tagger'])
from nltk.corpus import stopwords
from sklearn.model_selection import GridSearchCV
def load_data(database_filepath):
"""
Load the data
Inputs:
database_filepath: String. Filepath for the db file containing the cleaned data.
Output:
X: dataframe. Contains the feature data.
y: dataframe. Contains the labels (categories) data.
category_names: List of strings. Contains the labels names.
"""
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = pd.read_sql_table('messages_cat', engine)
X = df['message']
y = df.drop(['message', 'genre', 'id', 'original'], axis=1)
y = y.drop(columns=["related", "other_infrastructure", "other_weather", "other_aid", "direct_report", "weather_related"])
category_names = y.columns.tolist()
return X, y, category_names
def tokenize(text):
"""
Normalize, tokenize and stems texts.
Input:
text: string. Sentence containing a message.
Output:
stemmed_tokens: list of strings. A list of strings containing normalized and stemmed tokens.
"""
# creating stop words
stop_words = set(stopwords.words("english"))
# normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# tokenize text
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# lemmatize and remove stop words
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens
class StartVerbExtractor(BaseEstimator, TransformerMixin):
def start_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
if len(pos_tags) != 0:
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return 1
return 0
def fit(self, X, y=None):
return self
def transform(self, X):
X_tag = pd.Series(X).apply(self.start_verb)
return pd.DataFrame(X_tag)
def build_model():
"""
Builds a ML pipeline and performs gridsearch.
Args:
None
Returns:
cv: gridsearchcv object.
"""
pipeline = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('starting_verb', StartVerbExtractor())
])),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {'clf__estimator__n_estimators': [100, 200],
'clf__estimator__random_state': [42]}
cv = GridSearchCV(pipeline, param_grid = parameters, refit = True, verbose = 1, return_train_score = True, n_jobs = 2)
return cv
def evaluate_model(model, X_test, y_test, category_names):
"""
Returns test accuracy, number of 1s and 0s, recall, precision and F1 Score.
Inputs:
model: model object. Instanciated model.
X_test: pandas dataframe containing test features.
y_test: pandas dataframe containing test labels.
category_names: list of strings containing category names.
Returns:
None
"""
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred, target_names=category_names))
def save_model(model, model_filepath):
pickle.dump(model.best_estimator_, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
# import of built-in modules
import datetime
import os
import sys
# import of third party modules
# None
# import of local modules
import DeDriftAndResampleHCP7T_OneSubjectCompletionChecker
import hcp.hcp7t.archive as hcp7t_archive
import hcp.hcp7t.subject as hcp7t_subject
import utils.file_utils as file_utils
# authorship information
__author__ = "Timothy B. Brown"
__copyright__ = "Copyright 2016, The Human Connectome Project"
__maintainer__ = "Timothy B. Brown"
_PROJECT = 'HCP_1200'
def _inform(msg):
"""Outputs a message that is prefixed by the module file name."""
print(os.path.basename(__file__) + ": " + msg)
def _is_subject_complete(subject_results_dict):
for scan, scan_results in subject_results_dict.items():
if scan_results['files_exist'] == 'FALSE':
return False
return True
def _write_subject_info(subject, subject_results_dict, afile):
for scan, scan_results in sorted(subject_results_dict.items()):
output_str = _PROJECT + '\t' + subject.subject_id + '\t'
output_str += scan_results['resource_name'] + '\t'
output_str += scan_results['scan_name'] + '\t'
output_str += scan_results['resource_exists'] + '\t'
output_str += scan_results['resource_date'] + '\t'
output_str += scan_results['files_exist']
afile.write(output_str + os.linesep)
print(output_str)
print("")
def should_check(subject, scan, archive):
if scan == 'tfMRI_7T_RETCCW_AP_RETCW_PA_RETEXP_AP_RETCON_PA_RETBAR1_AP_RETBAR2_PA':
retinotopy_scan_count = len(archive.available_retinotopy_unproc_dirs(subject))
return retinotopy_scan_count == 6
else:
return archive.does_functional_unproc_exist(subject, scan)
if __name__ == "__main__":
# Get list of subjects to check
subject_file_name = file_utils.get_subjects_file_name(__file__)
_inform("Retrieving subject list from: " + subject_file_name)
subject_list = hcp7t_subject.read_subject_info_list(subject_file_name)
# Create list of scan names to check
dedrift_scan_names_list = []
dedrift_scan_names_list.append('rfMRI_REST1_PA')
dedrift_scan_names_list.append('rfMRI_REST2_AP')
dedrift_scan_names_list.append('rfMRI_REST3_PA')
dedrift_scan_names_list.append('rfMRI_REST4_AP')
dedrift_scan_names_list.append('tfMRI_MOVIE1_AP')
dedrift_scan_names_list.append('tfMRI_MOVIE2_PA')
dedrift_scan_names_list.append('tfMRI_MOVIE3_PA')
dedrift_scan_names_list.append('tfMRI_MOVIE4_AP')
dedrift_scan_names_list.append('tfMRI_RETBAR1_AP')
dedrift_scan_names_list.append('tfMRI_RETBAR2_PA')
dedrift_scan_names_list.append('tfMRI_RETCCW_AP')
dedrift_scan_names_list.append('tfMRI_RETCON_PA')
dedrift_scan_names_list.append('tfMRI_RETCW_PA')
dedrift_scan_names_list.append('tfMRI_RETEXP_AP')
dedrift_scan_names_list.append('tfMRI_7T_RETCCW_AP_RETCW_PA_RETEXP_AP_RETCON_PA_RETBAR1_AP_RETBAR2_PA')
# open complete and incomplete files for writing
complete_file = open(_PROJECT + '.complete.status', 'w')
incomplete_file = open(_PROJECT + '.incomplete.status', 'w')
# Create archive
archive = hcp7t_archive.Hcp7T_Archive()
# Create DeDriftAndResampleHCP7T One subject completion checker
completion_checker = DeDriftAndResampleHCP7T_OneSubjectCompletionChecker.DeDriftAndResampleHCP7T_OneSubjectCompletionChecker()
# Check completion for listed subjects
for subject in subject_list:
subject_results_dict = dict()
for scan_name in dedrift_scan_names_list:
scan_results_dict = dict()
# Should we check for the MSMAllDeDrift resource
if should_check(subject, scan_name, archive):
# does the DeDriftAndResample resource exist?
if completion_checker.does_processed_resource_exist(archive, subject):
dedrift_resource_exists = "TRUE"
timestamp = os.path.getmtime(archive.DeDriftAndResample_processed_dir_name(subject))
dedrift_resource_date = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
if completion_checker.is_processing_complete(archive, subject, scan_name):
files_exist = "TRUE"
else:
files_exist = "FALSE"
else:
dedrift_resource_exists = "FALSE"
dedrift_resource_date = "N/A"
files_exist = "FALSE"
else:
# unprocessed resource does not exist
dedrift_resource_exists = "---"
dedrift_resource_date = "---"
files_exist = "---"
scan_results_dict['resource_name'] = archive.DEDRIFT_AND_RESAMPLE_RESOURCE_NAME
scan_results_dict['resource_exists'] = dedrift_resource_exists
scan_results_dict['resource_date'] = dedrift_resource_date
scan_results_dict['files_exist'] = files_exist
scan_results_dict['scan_name'] = scan_name
subject_results_dict[scan_name] = scan_results_dict
if _is_subject_complete(subject_results_dict):
_write_subject_info(subject, subject_results_dict, complete_file)
else:
_write_subject_info(subject, subject_results_dict, incomplete_file)
|
import numpy as np
import matplotlib.pyplot as plt
class ImagesHelper:
def __init__(self):
pass
@staticmethod
def get_bit_mask_from_bitmap(image):
PIXEL_COLOR_LIMIT = 10
pixels = image.load()
bitMask = np.zeros(image.size[0] * image.size[1])
for i in range(0, image.size[0]):
for j in range(0, image.size[1]):
pixel = pixels[j, i]
if (isinstance(pixel, int)):
if PIXEL_COLOR_LIMIT < pixel:
bitMask[i * image.size[1] + j] = 1
continue;
if (PIXEL_COLOR_LIMIT < pixel[0] or PIXEL_COLOR_LIMIT < pixel[1] or PIXEL_COLOR_LIMIT < pixel[2]):
bitMask[i * image.size[1] + j] = 1
return bitMask
@staticmethod
def plot_bit_mask(bit_mask, width, height):
image = np.asarray(bit_mask).reshape(width, height)
figure = plt.figure()
subplot = figure.add_subplot(111)
subplot.imshow(image, cmap='Greys_r')
plt.show()
@staticmethod
def subplot_bit_mask(bit_mask, width, height, subplot_args, show_plot = False):
image = np.asarray(bit_mask).reshape(width, height)
plt.subplot(subplot_args)
plt.imshow(image, cmap='Greys_r')
if show_plot:
plt.show()
|
import unittest
import unittest.mock as mock
import splendor_sim.interfaces.coin.i_coin_type as i_coin_type
import splendor_sim.interfaces.game_state.i_game_state as i_game_state
import splendor_sim.interfaces.player.i_player as i_player
import splendor_sim.interfaces.player.i_player_card_inventory as i_player_card_inventory
import splendor_sim.interfaces.player.i_player_sponsor_inventory as i_player_sponsor_inventory
import splendor_sim.interfaces.sponsor.i_sponsor as i_sponsor
import splendor_sim.interfaces.sponsor.i_sponsor_reserve as i_sponsor_reserve
import splendor_sim.src.action.purchase_sponsor_action as purchase_sponsor_action
class TestPurchaseSponsorAction(unittest.TestCase):
def setUp(self):
self._mock_coin_types = [
mock.create_autospec(spec=i_coin_type.ICoinType, spec_set=True)
for _ in range(3)
]
self._mock_sponsors = [
mock.create_autospec(spec=i_sponsor.ISponsor, spec_set=True)
for _ in range(3)
]
self._mock_sponsors_cost = {
self._mock_coin_types[0]: 3,
self._mock_coin_types[1]: 3,
self._mock_coin_types[2]: 3,
}
for sponsor in self._mock_sponsors:
sponsor.get_cost.return_value = self._mock_sponsors_cost
self._mock_player = mock.create_autospec(spec=i_player.IPlayer, spec_set=True)
self._mock_card_inventory = mock.create_autospec(
spec=i_player_card_inventory.IPlayerCardInventory, spec_set=True
)
self._mock_card_inventory.get_total_discount.return_value = {
self._mock_coin_types[0]: 3,
self._mock_coin_types[1]: 3,
self._mock_coin_types[2]: 3,
}
self._mock_player.get_card_inventory.return_value = self._mock_card_inventory
self._mock_sponsor_inventory = mock.create_autospec(
spec=i_player_sponsor_inventory.IPlayerSponsorInventory, spec_set=True
)
self._mock_player.get_sponsor_inventory.return_value = (
self._mock_sponsor_inventory
)
self._mock_game_state = mock.create_autospec(
spec=i_game_state.IGameState, spec_set=True
)
self._mock_sponsor_reserve = mock.create_autospec(
spec=i_sponsor_reserve.ISponsorReserve, spec_set=True
)
self._mock_sponsor_reserve.get_remaining_sponsor_set.return_value = {
self._mock_sponsors[0],
self._mock_sponsors[1],
}
self._mock_game_state.get_sponsor_reserve.return_value = (
self._mock_sponsor_reserve
)
def test_purchase_sponsor_action_init_valid(self):
# Arrange
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
# Assert
self.assertTrue(test_action.validate(self._mock_game_state))
def test_purchase_sponsor_action_validate_false_sponsor_not_available(self):
# Arrange
self._mock_sponsor_reserve.get_remaining_sponsor_set.return_value = {
self._mock_sponsors[1],
self._mock_sponsors[2],
}
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
# Assert
self.assertFalse(test_action.validate(self._mock_game_state))
def test_purchase_sponsor_action_validate_false_player_cant_afford(self):
# Arrange
self._mock_card_inventory.get_total_discount.return_value = {
self._mock_coin_types[0]: 3,
self._mock_coin_types[1]: 3,
self._mock_coin_types[2]: 2,
}
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
# Assert
self.assertFalse(test_action.validate(self._mock_game_state))
def test_purchase_sponsor_action_validate_false_player_cant_afford_missing_coin_type(
self
):
# Arrange
self._mock_card_inventory.get_total_discount.return_value = {
self._mock_coin_types[0]: 3,
self._mock_coin_types[2]: 3,
}
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
# Assert
self.assertFalse(test_action.validate(self._mock_game_state))
def test_purchase_sponsor_action_execute_invalid_sponsor_not_available(self):
# Arrange
self._mock_sponsor_reserve.get_remaining_sponsor_set.return_value = {
self._mock_sponsors[1],
self._mock_sponsors[2],
}
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
# Assert
with self.assertRaises(ValueError):
test_action.execute(self._mock_game_state)
def test_purchase_sponsor_action_execute_invalid_player_cant_afford(self):
# Arrange
self._mock_card_inventory.get_total_discount.return_value = {
self._mock_coin_types[0]: 3,
self._mock_coin_types[1]: 3,
self._mock_coin_types[2]: 2,
}
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
# Assert
with self.assertRaises(ValueError):
test_action.execute(self._mock_game_state)
def test_purchase_sponsor_action_execute_invalid_player_cant_afford_missing_coin_type(
self
):
# Arrange
self._mock_card_inventory.get_total_discount.return_value = {
self._mock_coin_types[0]: 3,
self._mock_coin_types[2]: 3,
}
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
# Assert
with self.assertRaises(ValueError):
test_action.execute(self._mock_game_state)
def test_purchase_sponsor_action_execute_player_gets_sponsor(self):
# Arrange
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
test_action.execute(self._mock_game_state)
# Assert
self._mock_sponsor_inventory.add_sponsor.assert_called_once_with(
self._mock_sponsors[0]
)
def test_purchase_sponsor_action_execute_sponsor_removed_from_reserve(self):
# Arrange
test_action = purchase_sponsor_action.PurchaseCardAction(
self._mock_player, self._mock_sponsors[0]
)
# Act
test_action.execute(self._mock_game_state)
# Assert
self._mock_sponsor_reserve.remove_sponsor.assert_called_once_with(
self._mock_sponsors[0]
)
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
class Net(nn.Module):
def forward(self, x):
x = self.layer1(x)
return x
Network = torch.load('CNN-L22-100.net')
Network.eval()
Ts = 10000-100
NX = 64
data = np.fromfile('1d_ks_L22.dat').reshape([20001,1,NX])[10100:]
recon_x_sum = np.zeros((Ts,1,NX))
Initial = data[0].reshape([1,1,NX])
input_ = Initial
for i in range(Ts):
input = Variable(torch.from_numpy(input_).float()).cuda()
x_reconst = Network(input)
input_ = x_reconst.cpu().data.numpy()
recon_x_sum[i:i+1] = x_reconst.cpu().data.numpy()
del x_reconst
print(i)
recon_x_sum.tofile('recon_CNN_L22.dat')
|
# -*- coding: utf-8 -*-
import KBEngine
from KBEDebug import *
import const
import utility
import json
import switch
import x42
import copy
from roomParamsHelper import roomParamsChecker, roomParamsGetter
class iRoomOperation(object):
""" 玩家游戏相关 """
def __init__(self):
self.room = None
# 当前正在创建房间时再次请求创建需要拒绝
self.req_entering_room = False
def createRoom(self, game_type, create_json):
create_dict = None
try:
create_dict = json.loads(create_json)
except:
return
DEBUG_MSG("create room args = {}".format(create_dict))
create_dict['room_type'] = const.NORMAL_ROOM
if not roomParamsChecker(game_type, create_dict):
return
if self.req_entering_room:
return
if self.cell is not None:
self.createRoomFailed(const.CREATE_FAILED_ALREADY_IN_ROOM)
return
self.req_entering_room = True
def callback(content):
if self.isDestroyed:
return
if content is None:
DEBUG_MSG("createRoom callback error: content is None, user id {}".format(self.userId))
self.createRoomFailed(const.CREATE_FAILED_NET_SERVER_ERROR)
return
try:
DEBUG_MSG("cards response: {}".format(content))
if content[0] != '{':
self.createRoomFailed(const.CREATE_FAILED_NET_SERVER_ERROR)
return
data = json.loads(content)
card_cost, diamond_cost = utility.calc_cost(game_type, create_dict)
if card_cost > data["card"] and diamond_cost > data["diamond"]:
self.createRoomFailed(const.CREATE_FAILED_NO_ENOUGH_CARDS)
return
params = {
'owner_uid' : self.userId,
'club_id' : 0,
}
params.update(roomParamsGetter(game_type, create_dict))
room = x42.GW.createRoom(game_type, params)
if room:
self.createRoomSucceed(room)
else:
self.createRoomFailed(const.CREATE_FAILED_OTHER)
except:
import traceback
ERROR_MSG("createRoom callback content = {} error:{}".format(content, traceback.format_exc()))
self.createRoomFailed(const.CREATE_FAILED_OTHER)
if switch.DEBUG_BASE or x42.GW.isDailyActFree:
callback('{"card":99, "diamond":9999}')
else:
utility.get_user_info(self.accountName, callback)
def createRoomSucceed(self, room):
self.room = room
room.enterRoom(self, True)
def createRoomFailed(self, err):
self.req_entering_room = False
if self.hasClient:
self.client.createRoomFailed(err)
# c2s
def enterRoom(self, roomID):
if self.req_entering_room:
DEBUG_MSG("iRoomOperation: enterRoom failed; entering or creating room")
return
if self.cell is not None:
self.enterRoomFailed(const.ENTER_FAILED_ALREADY_IN_ROOM)
return
self.req_entering_room = True
x42.GW.enterRoom(roomID, self)
def enterRoomSucceed(self, room):
self.room = room
def enterRoomFailed(self, err):
self.req_entering_room = False
if self.hasClient:
self.client.enterRoomFailed(err)
def leaveRoomSucceed(self):
self.req_entering_room = False
self.room = None
def saveGameResult(self, json_r):
# 保存玩家房间牌局战绩, 只保留最近n条记录
DEBUG_MSG("saveGameResult: {}".format(len(self.game_history)))
self.game_history.append(json_r)
self.game_history = self.game_history[-const.MAX_HISTORY_RESULT:]
self.writeToDB()
def getPageGameHistory(self, page, size, filter=None, order=None):
game_history = copy.deepcopy(self.game_history)
game_history.reverse()
if size is not None:
game_history = game_history[page * size : min(page * size + size, len(game_history))]
if self.hasClient:
self.client.pushPageGameRecordList(game_history, page, size, len(self.game_history))
def get_simple_client_dict(self):
return {
'head_icon': self.head_icon,
'nickname': self.name,
'sex': self.sex,
'userId': self.userId,
'online': 1 if self.hasClient else 0
}
def inviteClubMemberRoom(self, member_list):
if self.room is None or len(member_list) <= 0:
return
userInfo = {
'head_icon': self.head_icon,
'name': self.name,
'sex': self.sex,
'userId': self.userId,
}
self.room.inviteClubMemberRoom(self, userInfo, member_list)
def chargeEffect(self):
card_cost = 1
diamond_cost = 9999
def pay_callback(content):
INFO_MSG("player charge effect userId:{} account:{} content:{}".format(self.userId, self.accountName, content))
if content is not None and content[0] == '{':
if self.client:
self.client.client_update_card_diamond()
utility.update_card_diamond(self.accountName, -card_cost, -diamond_cost, pay_callback, "player {} pay effect".format(self.userId))
|
from discord_webhook import DiscordWebhook, DiscordEmbed
class discord_data:
url = None
name = None
class Message:
header = ""
content = ""
user = ""
footer = ""
color = 0xc8702a
class Discord:
def __init__(self,config):
self.data = discord_data()
self.data.url = config['discord']['webhook_url']
self.data.name = config['discord']['name']
self.webhook = DiscordWebhook(self.data.url)
def send(self,message):
if self.webhook != None:
embed = DiscordEmbed(title='%s' % (message.header), description='%s' % (message.content), color=message.color)
embed.set_author(name=message.user)
embed.set_footer(text=message.footer, ts=True)
self.webhook.add_embed(embed)
self.webhook.execute()
else:
assert False,"Discord was not initilized" |
from .models import Utilisateur,Evenement
from .exceptions import Exception_sans_var, Exception_avec_var,Exception_participant
import re
def verifie_user(mail, mdp):
user = Utilisateur.objects.filter(email=mail, mdp_hashe=mdp).first()
if user is None:
raise Exception_sans_var(1000)
def verifie_mail(email):
reg = r'^[A-Za-z0-9]+([_|\.|-]{1}[A-Za-z0-9]+)*@[A-Za-z0-9]+([_|\.|-]{1}[A-Za-z0-9]+)*[\.]{1}[a-z]{2,6}$'
if re.match(reg,email) is None:
raise Exception_avec_var(2001, email)
def verifie_tel(numero):
reg =r'^0[0-9]([ .-]?[0-9]{2}){4}$'
#reg =r'^\+?[03]3?[ .-]?[0-9]([ .-]?[0-9]{2}){4}$'
if re.match(reg,numero) is None:
raise Exception_avec_var(2002, numero)
def get_verifie_identifiant_user(identifiant):
user = Utilisateur.objects.filter(id = identifiant).first()
if user is None:
raise Exception_participant(1006,identifiant)
else:
return user
def get_verifie_email_user(email):
verifie_mail(email)
user = Utilisateur.objects.filter(email = email).first()
if user is None:
raise Exception_avec_var(1003,email)
else:
return user
def verifie_inexistant_mail_user(email):
user = Utilisateur.objects.filter(email = email).first()
if user is not None:
raise Exception_avec_var(1001, email)
def get_verifie_evenement(id_evenement):
evenement = Evenement.objects.filter(id =id_evenement).first()
if evenement is None:
raise Exception_participant(1005,id_evenement)
else:
return evenement
def get_verifie_geolocalisation(id_user):
user = get_verifie_identifiant_user(id_user)
if (not user.geoloc_active): #or (user.position_actuelle_lat is None) or(user.position_actuelle_long is None):
# bien penser a checker les is None
raise Exception_participant(1007, id_user)
else:
dico_geo = {'latitude':user.position_actuelle_lat, 'longitude':user.position_actuelle_long}
return dico_geo
|
# prompt user with series of inputs for Mad Lib fill ins - example, a singular noun, an adjective, etc.
# place that data in pre made story template
print("Lets Mad Lib!!!")
adjetive1 = input("Give me an adjetive >")
adjetive2 = input("Another adjetive please >")
adjetive3 = input("Another adjetive >")
plural_noun1 = input("Give me a plural noun")
verb1 = input("Now a verb >")
plural_noun_animal1 = input("Now a plural noun or animal >")
plural_noun_animal2 = input("Another plural noun or animal >")
verb2 = input("A verb >")
noun_food1 = input("Now give me a noun or food >")
noun_food2 = input("Another noun or food >")
verb3 = input("A verb >")
plural_noun2 = input("A plural noun >")
adjetive4 = input("Yet again, another adjetive >")
noun_animal1 = input("Now a noun or animal >")
noun_food3 = input("Give me another noun or food >")
noun_food4 = input("A noun or food >")
print("The rainforest is a {} and {} place with a variety of {} {} who live there. Some animals {} high in the trees, like {} and {}. These animals {} foods like {} and {}. Other animals {} on the forest floor, like {} and the {} {}, eating {} and {} to survive.".format(adjetive1, adjetive2, adjetive3, plural_noun1, verb1, plural_noun_animal1, plural_noun_animal2, verb2, noun_food1, noun_food2,verb3, plural_noun2, adjetive4, noun_animal1, noun_food3, noun_food4)) |
from django.conf.urls import include, url
from forum_messages.views import AorMessageView, AorConversationView, \
AorReplyView, AorWriteView
merged_patterns = [
url(r'^reply/(?P<message_id>[\d]+)/$', AorReplyView.as_view(), name='reply'),
url(r'^view/(?P<message_id>[\d]+)/$', AorMessageView.as_view(), name='view'),
url(r'^view/t/(?P<thread_id>[\d]+)/$', AorConversationView.as_view(), name='view_conversation'),
url(r'^write/(?:(?P<recipients>[^/#]+)/)?$', AorWriteView.as_view(), name='write'),
url(r'^', include('postman.urls')),
]
urlpatterns = [
url(r'^', include(merged_patterns, namespace='postman', app_name='postman')),
]
|
#!/usr/bin/env python3
red = '\033[0;31m'
reset = '\033[0m'
print(red + 'what is your name' + reset )
name = input('> ')
print('hi there ' + name)
|
import logging
import numpy as np
import cv2 as cv
import triangulation
from numpy.core.numeric import Inf
from scipy.optimize import minimize
from numpy.linalg import pinv, norm
from math import acos, cos, pi, sin, sqrt
from numpy import dot
ref = None
VpStar = None
depthVector = None
def run(images) :
patches = []
for img in images :
global ref
ref = img
for feat1 in ref.features :
# Compute features satisfying epipolar constraints
F = computeF(ref, images, feat1)
# # Sort features
# F = sortF(ref, feat1,F)
# for feat2 in F :
# # Initialize patch
# patch = computePatch(feat1, feat2, ref)
# # Initialize Vp and V*p
# Vp = computeVp(ref, images, 60)
# global VpStar
# VpStar = computeVpStar(ref, patch, Vp, 0.6)
# # Refine patch
# if len(VpStar) < 3 :
# continue
# else :
# patch = refinePatch(ref, patch)
# # Update VP Star
# VpStar = computeVpStar(ref, patch, Vp, 0.7)
# # If |V*(p)| < gamma
# if len(VpStar) > 3 :
# # Add patch to cell
# registerPatch(patch, VpStar)
# patches.append(patch)
return patches
def computeF(ref, images, feat1) :
logging.info(f'IMAGE {ref.id:02d}:Computing epipolar features.')
id1 = ref.id
F = []
coordinate = np.array([
feat1.x,
feat1.y,
1
])
logging.debug(f'Feature coordinate : {coordinate}')
for img in images :
id2 = img.id
features = img.features
if id1 == id2 :
continue
else :
fundamentalMatrix = computeFundamentalMatrix(ref, img)
epiline = fundamentalMatrix @ coordinate
logging.debug(f'Epiline : {epiline}')
for feat2 in features :
dist = computeDistance(feat2, epiline)
if dist <= 5 :
F.append(feat2)
dispEpiline(feat1, feat2, ref, epiline)
return F
def sortF(ref, feat1, F) :
logging.info(f'IMAGE {ref.id:02d}:Sorting epipolar features.')
for feat2 in F :
img = feat2.image
projectionMatrix1 = ref.projectionMatrix
projectionMatrix2 = img.projectionMatrix
opticalCentre1 = ref.opticalCentre
# pt = triangulation.yasuVersion(feat1, feat2, projectionMatrix1, projectionMatrix2)
pt = triangulation.myVersion(feat1, feat2, projectionMatrix1, projectionMatrix2)
vector1 = pt - opticalCentre1
vector2 = pt - feat2.image.opticalCentre
depth = abs(norm((vector1)) - norm((vector2)))
depth = norm(vector1)
feat2.depth = depth
F = insertionSort(F)
return F
def computePatch(feat1, feat2, ref) :
logging.info(f'IMAGE {ref.id:02d}:Constructing patch.')
img = feat2.image
opticalCentre = ref.opticalCentre
projectionMatrix1 = ref.projectionMatrix
projectionMatrix2 = img.projectionMatrix
centre = triangulation.myVersion(feat1, feat2, projectionMatrix1, projectionMatrix2)
normal = opticalCentre - centre
normal /= norm(normal)
patch = Patch(centre, normal, ref)
# Compute x and y vectors lying on patch
px, py = getPatchAxes.yasuVersion(ref, patch)
patch.px = px
patch.py = py
return patch
def computeVp(ref, images, minAngle) :
logging.info(f'IMAGE {ref.id:02d}:Computing Vp.')
id1 = ref.id
Vp = []
Vp.append(ref)
for img in images :
id2 = img.id
if id1 == id2 :
continue
else :
opticalAxis1 = np.array([
ref.projectionMatrix[2][0],
ref.projectionMatrix[2][1],
ref.projectionMatrix[2][2]
])
opticalAxis2 = np.array([
img.projectionMatrix[2][0],
img.projectionMatrix[2][1],
img.projectionMatrix[2][2]
])
angle = dot(opticalAxis1, opticalAxis2)
if angle < cos(minAngle * pi/180) :
continue
else :
Vp.append(img)
logging.info(f'IMAGE {ref.id:02d}:Vp Size = {len(Vp)}.')
return Vp
def computeVpStar(ref, patch, Vp, alpha):
logging.info(f'IMAGE {ref.id:02d}:Computing VpStar.')
id1 = ref.id
VpStar = []
for img in Vp :
id2 = img.id
if id1 == id2 :
continue
else :
h = 1 - ncc(ref, img, patch)
if h < alpha :
VpStar.append(img)
logging.info(f'IMAGE {ref.id:02d}:Vp Star Size = {len(VpStar)}.')
return VpStar
def computeDistance(feature, epiline) :
distance = (abs(
epiline[0]*feature.x +
epiline[1]*feature.y +
epiline[2]
)) / (sqrt(
epiline[0]**2 +
epiline[1]**2
))
return distance
def computeFundamentalMatrix(ref, img) :
opticalCentre1 = ref.opticalCentre
projectionMatrix1 = ref.projectionMatrix
projectionMatrix2 = img.projectionMatrix
epipole = projectionMatrix2 @ opticalCentre1
epipole = np.array([
[ 0, -epipole[2], epipole[1]],
[ epipole[2], 0, -epipole[0]],
[-epipole[1], epipole[0], 0]
])
fundamentalMatrix = epipole @ projectionMatrix2 @ pinv(projectionMatrix1)
fundamentalMatrix = fundamentalMatrix / fundamentalMatrix[-1, -1]
logging.debug(f'Fundamental Matrix : {fundamentalMatrix}')
return fundamentalMatrix
def refinePatch(ref, patch) :
DoF = encode(ref, patch)
print("Optimizing*")
result = minimize(fun=funcWrapper, x0=DoF, method='Nelder-Mead', options={'maxfev':1000})
patch = decode(result.x)
return patch
def registerPatch(patch, VpStar) :
for img in VpStar :
pmat = img.projectionMatrix
pt = pmat @ patch.centre
pt /= pt[2]
x = int(pt/2)
y = int(pt/2)
img.cells[x][y].patch =patch
def funcWrapper(DoF) :
patch = decode(DoF)
return computeGStar(ref, VpStar, patch)
def computeGStar(ref, VpStar, patch) :
print("*", end="")
gStar = 0
for img in VpStar :
if img.id == ref.id :
continue
else :
gStar += 1 - ncc(ref, img, patch)
gStar /= len(VpStar) - 1
return gStar
def decode(DoF) :
depthUnit = DoF[0]
alpha = DoF[1]
beta = DoF[2]
x = cos(alpha) * sin(beta)
y = sin(alpha) * sin(beta)
z = cos(beta)
global depthVector
depthVector = depthVector * depthUnit
centre = ref.opticalCentre + depthVector
normal = np.array([x, y, z, 0])
patch = Patch(centre, normal, None)
px, py = getPatchAxes.yasuVersion(ref, patch)
patch.px = px
patch.py = py
return patch
def encode(ref, patch) :
global depthVector
depthVector = ref.opticalCentre - patch.centre
depthUnit = norm(depthVector)
depthVector = depthVector / depthUnit
x = patch.normal[0]
y = patch.normal[1]
z = patch.normal[2]
alpha = acos(x / sqrt(x**2 + y**2)) # yaw
beta = acos(z / sqrt(x**2 + y**2 + z**2)) # pitch
return depthUnit, alpha, beta
def dispEpiline(feat1, feat2, ref, epiline) :
ref2 = cv.imread(ref.name)
cv.circle(ref2, (int(feat1.x), int(feat1.y)), 4, (0, 255, 0), -1)
cv.imshow(f'Reference Image ID : {ref.id}', ref2)
img = feat2.image.computeFeatureMap()
epiline_x = (int(-epiline[2] / epiline[0]), 0)
epiline_y = (int((-epiline[2] - (epiline[1]*480)) / epiline[0]), 480)
cv.line(img, epiline_x, epiline_y, (255, 0, 0), 1)
cv.circle(img, (int(feat2.x), int(feat2.y)), 3, (0, 255, 0), -1)
cv.imshow(f'Reference Image ID : {ref.id}', ref2)
cv.imshow(f'Sensed Image ID : {feat2.image.id}', img)
cv.waitKey(0)
cv.destroyAllWindows()
def insertionSort(A) :
i = 1
while i < len(A) :
j = i
while j > 0 and A[j-1].depth > A[j].depth :
temp = A[j]
A[j] = A[j-1]
A[j-1] = temp
j = j - 1
i = i + 1
return A
def ncc(ref, img, patch) :
# Project the patch with grid onto each image
projectionMatrix1 = ref.projectionMatrix
projectionMatrix2 = img.projectionMatrix
gridCoordinate1 = projectGrid(patch, projectionMatrix1)
gridCoordinate2 = projectGrid(patch, projectionMatrix2)
gridVal1 = bilinearInterpolationModule(ref, gridCoordinate1)
gridVal2 = bilinearInterpolationModule(img, gridCoordinate2)
return computeNCC(gridVal1, gridVal2)
def projectGrid(patch, pmat) :
gridCoordinate = np.empty((5, 5, 3))
margin = 2.5
centre = pmat @ patch.centre
centre /= centre[2]
dx = pmat @ (patch.centre + patch.px)
dy = pmat @ (patch.centre + patch.py)
dx /= dx[2]
dy /= dy[2]
dx -= centre
dy -= centre
left = centre - dx*margin - dy*margin
for i in range(5) :
temp = left
left = left + dy
for j in range(5) :
gridCoordinate[i][j] = temp
temp = temp + dx
return gridCoordinate
def bilinearInterpolationModule(img, grid) :
gridVal = np.empty((5, 5, 3))
for i in range(grid.shape[0]) :
for j in range(grid.shape[1]) :
x = grid[i][j][0]
y = grid[i][j][1]
if (int(x) < 0 or int(y) < 0 or int(x) > 640 or int(y) > 480) :
gridVal[i][j] = np.array([0, 0, 0])
else :
# gridVal[i][j] = getPixel.jiaChenVersion((int(x), int(y)), img)
x1 = int(grid[i][j][0])
x2 = int(grid[i][j][0]) + 1
y1 = int(grid[i][j][1])
y2 = int(grid[i][j][1]) + 1
q11 = getPixel.jiaChenVersion((x1, y1), img)
q12 = getPixel.jiaChenVersion((x1, y2), img)
q21 = getPixel.jiaChenVersion((x2, y1), img)
q22 = getPixel.jiaChenVersion((x2, y2), img)
gridVal[i][j] = computeBilinearInterpolation(x, y, x1, x2, y1, y2, q11, q12, q21, q22)
return gridVal
def computeBilinearInterpolation(x, y, x1, x2, y1, y2, q11, q12, q21, q22) :
t = (x-x1) / (x2-x1)
u = (y-y1) / (y2-y1)
a = q11*(1-t)*(1-u)
b = q21*(t)*(1-u)
c = q12*(u)*(1-t)
d = q22*(t)*(u)
f = a + b + c + d
return f
def computeNCC(gridVal1, gridVal2) :
length = 75
mean1 = 0
mean2 = 0
for i in range(gridVal1.shape[0]) :
for j in range(gridVal1.shape[1]) :
for k in range(gridVal1.shape[2]) :
mean1 += gridVal1[i][j][k]
mean2 += gridVal2[i][j][k]
mean1 /= length
mean2 /= length
product = 0
std1 = 0
std2 = 0
for i in range(gridVal1.shape[0]) :
for j in range(gridVal1.shape[1]) :
for k in range(gridVal1.shape[2]) :
diff1 = gridVal1[i][j][k] - mean1
diff2 = gridVal2[i][j][k] - mean2
product += diff1 * diff2
std1 += diff1**2
std2 += diff2**2
stds = std1 * std2
if stds == 0 :
return 0
else :
return product / sqrt(stds) |
from unittest import TestCase
from poe.config import settings
from poe.web_api.session import (
PathSession, InvalidLoginException
)
class PathSessionTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.session = PathSession(settings["USERNAME"], settings["PASSWORD"])
def test_good_login(self):
pass
def test_bad_login(self):
self.assertRaises(InvalidLoginException, PathSession,
username="bad", password="login")
def test_get_stash_good(self):
self.session.get_stash(league="nemesis")
def test_get_stash_bad(self):
self.assertEquals(self.session.get_stash_tab(league="bad_league"), None)
|
from database.db import db
class AgeData(db.Model):
"""
Stores age bands, their data and the relativity.
"""
id = db.Column(db.Integer, autoincrement=True,
primary_key=True, nullable=False)
data = db.Column(db.Integer, nullable=False)
lower_limit = db.Column(db.Integer, nullable=False)
upper_limit = db.Column(db.Integer, nullable=False)
relativity = db.Column(db.Float, nullable=False)
def __init__(self, data, lower_limit, upper_limit, relativity):
self.data = data
self.lower_limit = lower_limit
self.upper_limit = upper_limit
self.relativity = relativity
def save(self):
db.session.add(self)
db.session.commit()
def update(self, data):
for key, item in data.items():
setattr(self, key, item)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
|
import ble2lsl as bl
from ble2lsl.devices import muse2016, ganglion
from pylsl import StreamInlet, resolve_byprop, StreamOutlet #receiving the EEG signals
import time
import numpy as np
import bokeh
import pylsl as lsl
# this is the first revision of convert.py from Samuel White
# taking alot of insporation from the BCI-Workshop code
class Convert:
def __init__(self, device, max_chunklen = 0 ,stream_type = 'EEG'):
streams = resolve_byprop('type', stream_type , timeout=2)
if len(streams) == 0:
raise RuntimeError('no EEG stream found')
inlet, timecorrect,info,descrition,sampling_f,num_channels = self.get_Stream_Info(streams)
## getting the names of the channels, not sure if this is needed
ch = descrition.child('channels').first_child()
self.ch_names = [ch.child_value('label')]
for i in range(1, num_channels):
ch = ch.next_sibling()
self.ch_names.append(ch.child_value('label'))
## getting the buffer lengths, epoch lengths, overlap
buffer_len = 15 ## change to a user input (or from device)
epoch_len = 1 #same as above comment
overlap_len = 0.8 # this is also dependent of the device file
def get_Stream_Info(self, streams):
inlet = StreamInlet(streams[0], max_chunklen=12, recover=False) ## create a getter to change the chuncklength based on the device
timecorrect = inlet.time_correction() # gets the time correction of the two buffers
info = inlet.info()
descrition = info.desc()
fs = int(info.nominal_srate())
num_channels = info.channel_count()
return inlet,timecorrect, info, descrition, fs, num_channels
#def index_Channels(self):
#index_channel = args.channels |
a=[1,2,3,4,5,6,7]
b=["ab","cd","ef","gh","ij"]
c=[1.1,2.1,3.1,4.1,5.1,6.1,7.1]
d=[1.1,2.1,3.1,4.1,5.1,6.1,7.1]
e=["ab","cd","ef","gh","ij"]
print(a)#打印所有元素
print(b[0])#打印部分元素
print(a[-1])#打印倒数第一个元素
#添加元素
a.append(8)
print(a)
print(a.append(7))
#插入元素
b.insert(2,"yy")
print(b)
#删除元素
del c[0]
print(c)
#弹出元素
d.pop(1)
print(d)
#根据值修改元素
e.remove("ij")
print(e)
f=[7,2,3,4,6,5,1]
g=[7,2,3,4,6,5,1]
h=["ab","cd","ef","gh","ij"]
#倒着打印
h.reverse()
print(h)
#临时排序,相当于仅打印时排序
print(sorted(f))
#正向排序
#f.sort()
print(f)
#确认列表长度
print(len(h))
|
#!/usr/bin/python3
'''initializes repo as a module, includes file storage'''
from models.engine import file_storage
__all__ = ["base_model", "amenity", "city", "user", "state", "place", "review"]
storage = file_storage.FileStorage()
storage.reload()
|
import logging
'''
logging.DEBUG -> 10
logging.INFO ->20
logging.WARNING -> 30
'''
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
#logging.basicConfig(level=logging.DEBUG, filename='example.log', filemode ='a')
logging.debug('Ignored !')
logging.info('This should be logged')
logging.warning('And this , too')
|
from web.controllers.api import route_api
from flask import request,jsonify
from application import app,db
import requests,json
from common.models.member.Member import Member
from common.libs.Helper import getCurrentDate
from common.libs.member.MemberService import MemberService
@route_api.route('/member/login',methods =['POST','GET'])
def login():
resp={'code':200,'msg':'操作成功','data':{}}#返回值
req=request.values#前台的数据
nickname = req['nickName'] if 'nickName' in req else ''
sex = req['gender'] if 'gender' in req else 0
avatar = req['avatarUrl'] if 'avatarUrl' in req else ''
code =req['code'] if 'code' in req else ''
if not code or len(code)<1:
resp['code']=-1
resp['msg']='需要code'
return jsonify(resp)
openid=MemberService.getWeChatOpenId(code)
#判断用户是否已经注册
member_info=Member.query.filter_by(openid=openid).first()
resp['msg'] = '已经注册'
if not member_info:
model_member = Member()
model_member.openid=openid
model_member.nickname = nickname
model_member.sex = sex
model_member.avatar = avatar
model_member.salt = MemberService.geneSalt()
model_member.updated_time = model_member.created_time = getCurrentDate()
db.session.add(model_member)
db.session.commit()
member_info =model_member
resp['msg']='注册成功'
member_info = Member.query.filter_by(id=member_info.id).first()
resp['code'] = 200
token = "%s#%s" % (MemberService.geneAuthCode(member_info), member_info.id)
resp['data'] = {'token': token}
return jsonify(resp)
@route_api.route('/member/check-reg',methods =['POST','GET'])
def checkReg():
resp = {'code': 200, 'msg': '操作成功', 'data': {}} # 返回值
req = request.values # 前台的数据
code = req['code'] if 'code' in req else ''
if not code or len(code) < 1:
resp['code'] = -1
resp['msg'] = '需要code'
return jsonify(resp)
openid = MemberService.getWeChatOpenId(code)
member_info = Member.query.filter_by(openid=openid).first()
if not member_info:
resp['code']=-1
resp['msg']='未注册'
return jsonify(resp)
token = "%s#%s" % (MemberService.geneAuthCode(member_info), member_info.id)
resp['data']={'token':token}#返回给前端
return jsonify(resp) |
# Copyright 2021 Intel-KAUST-Microsoft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SwitchML Project
# @file docs_setup.py
# @brief This is a simple sphinx extension that brings in and prepares documents scattered across the repository
import os
from pathlib import Path
def setup(app):
input_path_prefix = "../"
readmes_path_prefix = "readmes/"
Path(readmes_path_prefix).mkdir(parents=True, exist_ok=True)
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# All the readme files that we want to copy in the docs
readme_mappings = {
"README.md": "overview.md",
"CONTRIBUTING.md": "contrib.md",
"LICENSE": "license.md",
"dev_root/client_lib/README.md": "client_lib.md",
"dev_root/examples/README.md": "examples.md",
"dev_root/benchmarks/README.md": "benchmarks.md",
"dev_root/p4/README.md": "p4.md",
"dev_root/controller/README.md": "controller.md",
"dev_root/frameworks_integration/README.md": "frameworks_integration.md",
"dev_root/frameworks_integration/pytorch_patch/README.md": "pytorch_patch.md",
"dev_root/frameworks_integration/nccl_plugin/README.md": "nccl_plugin.md",
"dev_root/scripts/README.md": "scripts.md"
}
# We might need different links for three different use cases:
# 1. Browsing the repo on github: In this case we want the links to point to github folders and files.
# 2. Browsing the documentation on read the docs: In this case we want the links to point the read the docs pages.
# 3. Generating and browsing the documentation locally: In this case we want the links to point to the locally generated html pages.
#
# The readmes are by default written to address case 1. Therefore for the other 2 cases we need to provide a link mapping.
hyperlink_mappings = {
"/dev_root/p4": "p4",
"/dev_root/controller": "controller",
"/dev_root/client_lib": "client_lib",
"/dev_root/examples": "examples",
"/dev_root/benchmarks": "benchmarks",
"/CONTRIBUTING.md": "contrib",
"/LICENSE": "license",
"/dev_root/scripts": "scripts",
"/dev_root/frameworks_integration": "frameworks_integration",
"/dev_root/frameworks_integration/pytorch_patch": "pytorch_patch",
"/dev_root/frameworks_integration/nccl_plugin": "nccl_plugin",
"/docs/img/benchmark.png": "../../../../img/benchmark.png"
}
if on_rtd:
# Update any links particular to RTD here.
hyperlink_mappings["/docs/img/benchmark.png"] = "https://raw.githubusercontent.com/OasisArtisan/p4app-switchML/main/docs/img/benchmark.png"
print("Copying readme files from the repository and preparing them for RTD.")
for infile, outfile in readme_mappings.items():
with open(input_path_prefix + infile, "r") as f:
intext = "".join(f.readlines())
outtext = intext
for original, replacement in hyperlink_mappings.items():
if infile.endswith(".md"):
# Regex might be better. But this is good enough for now.
outtext = outtext.replace("]({})".format(original), "]({})".format(replacement))
else:
outtext = outtext.replace(original, replacement)
with open(readmes_path_prefix + outfile, "w") as f:
f.write(outtext)
|
import couchdb
couch = couchdb.Server()
def iscodeTaken(code):
db = couch["courses"]
for courseid in db:
if db[courseid]['code'] == code:
return True
return False
def suggestCode(n):
db = couch["courses"]
count = n
for courseid in db:
if count < db[courseid]["code"]:
count = db[courseid]["code"]
count = count + 1
if not iscodeTaken(count):
return count
else:
return suggestCode(count)
#print suggestCode(1)
def doesexsistbyname(name):
db = couch["courses"]
for courseid in db:
if db[courseid]["name"] == name:
return True
return False
def doesexsistbycode(code):
db = couch["courses"]
for courseid in db:
if db[courseid]["code"] == code:
return True
return False
def doesexsist(name,code):
if doesexsistbyname(name):
print "Name taken"
return True
if doesexsistbycode(code): #if either are true then it does exsist
print "Code taken"
print "Suggested Code: " + str(suggestCode(code))
return True
print 'Class does not exsist. Yet...'
return False
#doesexsist("Freshman Comp",999)
#doesexsist("Freshman",1)
#doesexsist("Freshman",999)
#create a course
def newCourse(name,code):
if doesexsist(name,code):
print "Course with same name or code already exsists"
else:
db = couch["courses"]
course = {
'name': name,
'code':code,
'sections':[]
}
db.save(course)
print "Course " + name + " created"
#newCourse("Freshman Comp",1)
def userNewCourse():
name = raw_input("Course Name: ")
code = input("Code: ")
newCourse(name,code)
def doesSectionExsist(code,sectioncode):
db = couch["courses"]
for courseid in db:
for section in db[courseid]["sections"]:
if section["section"] == sectioncode:
return True
return False
#print doesSectionExsist(1,'x')
def newSection(code,sectioncode,days,period,room,double,capacity,numRegistered,sRegistered,teacher):
if doesSectionExsist(code,sectioncode):
print "Section code already used. Check database to decide new section code."
else:
newSection = {
'section':sectioncode,
'teacher':teacher,
'days':days,
'period':period,
'room':room,
'double':double,
'capacity':capacity,
'numRegistered':numRegistered,
'sRegistered':sRegistered
}
db = couch["courses"]
#find course
for courseid in db:
course = db[courseid]
if course["code"] == code:
course["sections"].append(newSection)
db.save(course)
return True
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def userNewSection():
code = input("Course code: ")
sectioncode = raw_input("Section codename: ")
days = raw_input("Days (i.e. MTWRF): ")
period = input("Period: ")
room = raw_input("Room : ")
double = str2bool(raw_input("Is a double period (t/f)? "))
capacity = input("Class capacity: ")
numRegistered = input("Number of registered students: ")
sRegistered = []
for i in range(0, numRegistered):
sRegistered.append(input("studentid: "))
teacher = raw_input("Teacher name: ")
if newSection(code,sectioncode,days,period,room,double,capacity,numRegistered,sRegistered,teacher):
print "section added"
return True
else:
return False
#userNewSection()
|
from vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor
from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor
from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite, create_mobilenetv1_ssd_lite_predictor
from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite, create_squeezenet_ssd_lite_predictor
from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor
from vision.utils.misc import Timer
import cv2
import sys
import torch
import os
def _find_classes(dir):
if sys.version_info >= (3, 5):
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def sortBoxes(Boxes,labels):
line1=[]
line2=[]
labelsline1=[]
labelsline2=[]
BoxesSort=[]
LabelsSort=[]
maxBoxes=torch.max(Boxes,0)
minBoxes=torch.min(Boxes,0)
if maxBoxes.values[1] > (Boxes[0][3].item()-Boxes[0][1].item()):
thresY=(maxBoxes.values[1]+minBoxes.values[1])/2
for i in range(boxes.size(0)):
box = boxes[i, :].numpy()
if box[1]<thresY.item():
line1.append(box)
labelsline1.append(labels.numpy()[i])
else:
line2.append(box)
labelsline2.append(labels.numpy()[i])
sortline1=sorted(line1 , key=lambda k:k[0])
sortline2=sorted(line2 , key=lambda k:k[0])
indexaftersortl1=[i[0] for i in sorted(enumerate(line1), key=lambda x:x[1][0])]
indexaftersortl2=[i[0] for i in sorted(enumerate(line2), key=lambda x:x[1][0])]
BoxesSort=sortline1+sortline2
for i in indexaftersortl1:
LabelsSort.append(labelsline1[i])
for i in indexaftersortl2:
LabelsSort.append(labelsline2[i])
else:
BoxesSort=sorted(Boxes.numpy() , key=lambda k:k[0])
indexaftersortl=[i[0] for i in sorted(enumerate(Boxes.numpy()), key=lambda x:x[1][0])]
for i in indexaftersortl:
LabelsSort.append(labels.numpy()[i])
print("LabelsSort",LabelsSort)
return torch.tensor(BoxesSort),torch.tensor(LabelsSort)
if len(sys.argv) < 5:
print('Usage: python run_ssd_example.py <net type> <model path> <label path> <image path>')
sys.exit(0)
net_type = sys.argv[1]
model_path = sys.argv[2]
label_path = sys.argv[3]
image_path = sys.argv[4]
class_names = [name.strip() for name in open(label_path).readlines()]
if net_type == 'vgg16-ssd':
net = create_vgg_ssd(len(class_names), is_test=True)
elif net_type == 'mb1-ssd':
net = create_mobilenetv1_ssd(len(class_names), is_test=True)
elif net_type == 'mb1-ssd-lite':
net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
elif net_type == 'mb2-ssd-lite':
net = create_mobilenetv2_ssd_lite(len(class_names), is_test=True)
elif net_type == 'sq-ssd-lite':
net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
else:
print("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.")
sys.exit(1)
net.load(model_path)
if net_type == 'vgg16-ssd':
predictor = create_vgg_ssd_predictor(net, candidate_size=200)
elif net_type == 'mb1-ssd':
predictor = create_mobilenetv1_ssd_predictor(net, candidate_size=200)
elif net_type == 'mb1-ssd-lite':
predictor = create_mobilenetv1_ssd_lite_predictor(net, candidate_size=200)
elif net_type == 'mb2-ssd-lite':
predictor = create_mobilenetv2_ssd_lite_predictor(net, candidate_size=200)
elif net_type == 'sq-ssd-lite':
predictor = create_squeezenet_ssd_lite_predictor(net, candidate_size=200)
else:
predictor = create_vgg_ssd_predictor(net, candidate_size=200)
classes, class_to_idx = _find_classes(os.path.join("images","images_long"))
for target in sorted(class_to_idx.keys()):
d = os.path.join(os.path.join("images","images_long"),target)
if not os.path.isdir(d):
continue
for folder in sorted(os.listdir(d)):
orig_image = cv2.imread(d+"/"+folder)
image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
boxes, labels, probs = predictor.predict(image, 10, 0.3)
boxes, labels=sortBoxes(boxes,labels)
labels_dict=[]
#print(boxes)
for i in range(boxes.size(0)):
box = boxes[i, :]
#print(box)
cv2.rectangle(orig_image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 1)
#label = f"""{voc_dataset.class_names[labels[i]]}: {probs[i]:.2f}"""
label = f"{class_names[labels[i]]}"
labels_dict.append(label)
print(label)
cv2.putText(orig_image, label,
(int(box[0]) , int(box[1]) + 5),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, # font scale
(0, 0, 255),
1) # line type
print(labels_dict)
print(f"Found {len(probs)} objects")
cv2.imshow("image",orig_image)
cv2.waitKey(0)
|
#!/usr/bin/env python3
import extract_audio_wav as eaw
from preprocess_shrek2 import *
s2 = load_only_shrek_from_shrek_2_srt()
s2_lines = [
str(eaw.Ffmpeg_Command(subtitle=sub, mov_path="Shrek_2.wav"))
for sub in s2
]
lines = [line + " &\nwait $!\n" for line in s2_lines]
with open('gen_wav_synchronous.sh', 'a+') as f:
for line in lines:
f.write(line)
|
from django.shortcuts import render,HttpResponse,redirect
# Create your views here.
# 显示学生信息
from django.urls import reverse
from students.models import Student
# 学生信息展示页
def student_list(request):
student_list = Student.objects.all()
return render(request,'students/student.html',locals())
# return redirect(reverse("students:index"))
# 学生个人信息展示页
def stu_message(request,id):
# return HttpResponse("你真帅")
student = Student.objects.get(id=id)
return render(request,'students/stu_message.html',locals())
# 反向解析的函数
def index(request):
return render(request,'students/index.html')
|
import smtplib
from datetime import date
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from dao.borrow_dao import BorrowDAO
def send_reminder(password, book_recipient_list):
# order:
# start conn, ehlo, start tls, log in, make message object, send it, close connection
try:
conn = smtplib.SMTP('smtp.gmail.com', 587) # email server and the port, initialise a connection with the server
except Exception:
# not the best idea but smtplib raises a whole lot of exceptions that cannot be caught by the try-except clause
return 'no connection'
sender_email = 'nischal.poudel@claremontseniorschool.co.uk'
conn.ehlo() # identify ourselves to the server, saying hi
conn.starttls() # initializes a secure connection with the server
try:
conn.login(sender_email, password)
except smtplib.SMTPAuthenticationError: #raised when the password is wrong
conn.close()
return 'wrong password'
for borrow_id, recipient_email, recipient_name, book_title, due_date in book_recipient_list:
full_recipient_email = recipient_email + '@claremontseniorschool.co.uk'
cc = 'rpl.psycho@gmail.com'
full_recipient_list = [full_recipient_email, cc]
borrowing_late_for_days = get_borrowing_late_for_days(due_date)
if borrowing_late_for_days > 15:
cc = 'rpl.psycho@gmail.com'
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = full_recipient_email
msg['Cc'] = cc
msg['Subject'] = 'Book Borrowing Flagged Lost'
due_date = str(due_date)
body = ('Dear {0},\n\n'
'You have failed to return the book "{1}" by the due date ({2}). '
'It has been 15 days or more since the due date. So, as per the library policy, the book is '
'now considered lost, and you will be able to borrow one less book than you previously could.\n\n'
'If you can still return the book in a suitable condition, you may not be fined. '
'But that is a decision held by the staff overlooking the library finance '
'from the school finance department.\n\n'
'Regards,\n'
'Claremont Library Management').format(recipient_name, book_title, due_date)
msg.attach(MIMEText(body, 'plain'))
# attaching body with the rest of the email object, plain as it's plain text rather than html or xml
final_email = msg.as_string()
conn.sendmail(sender_email, [full_recipient_email, cc], final_email)
BorrowDAO.set_flag_lost(borrow_id)
else:
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = full_recipient_email
msg['Subject'] = 'Book Borrowing Crossed Due Date'
body = ('Dear {0},\n\n'
'Your book borrowing for "{1}" has crossed its due date ({2}). Please return the book promptly.\n\n'
'Regards,\n'
'Claremont Library Management').format(recipient_name, book_title, due_date)
msg.attach(MIMEText(body, 'plain'))
# attaching body with the rest of the email object, plain as it's plain text rather than html or xml
text = msg.as_string()
conn.sendmail(sender_email, full_recipient_email, text)
conn.close() # close the parameters
return 'done'
def get_borrowing_late_for_days(due_date):
present_date = date.today()
result = (present_date - due_date).days
return result
|
dict1 = {
'Name' : 'ROHIT',
'Gender' : 'Male',
'Age' : 28,
'Education' : 'B.tech',
'Nationality': 'Indian',
'DOB' : '23-5-1995',
'Religion' : 'Hindu'
}
print("----------------------")
print(dict1.get('Age')) # printing particular items using get.
# Loop 01 for getting keys using keys function
for e in dict1.keys():
print(e)
print("----------------------")
# Loop 02 for getting values using values function
for e in dict1.values():
print(e)
print("----------------------")
# Loop 03 for getting b/o keys and values using items function
for e in dict1.items():
print(e,e[0],e[1])
print("----------------------")
# Loop 04 for getting items in another way
for x,y in dict1.items():
print(x,":",y)
print("----------------------")
a = dict1.pop("DOB") # to erase a item. It returns the removed item.
print(a)
print("----------------------")
del dict1['Age'] # to del a item. It will not return anything
print(dict1)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
import pva
from tensorflow.python.ipu import test_utils as tu
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import def_function
from tensorflow.python.platform import googletest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import gradient_descent
class IpuXlaVariableTest(xla_test.XLATestCase):
def testInitializeSimpleVariables(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with ops.device("/device:IPU:0"):
with self.session() as sess:
x = resource_variable_ops.ResourceVariable(random_ops.random_normal(
[5, 5], stddev=0.1),
name="x")
y = resource_variable_ops.ResourceVariable(random_ops.random_normal(
[1], stddev=0.1),
name="y")
sess.run(variables.global_variables_initializer())
r1, r2 = sess.run([x, y])
self.assertAllClose(r1, np.zeros([5, 5]), atol=1.0)
self.assertAllClose(r2, [0.0], atol=1.0)
def testInitializeSharedVariables(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with ops.device("/device:IPU:0"):
with self.session() as sess:
with variable_scope.variable_scope("vs", use_resource=True):
x = variable_scope.get_variable(
"x",
shape=[],
dtype=np.float32,
initializer=init_ops.constant_initializer(1))
y = variable_scope.get_variable(
"y",
shape=[],
dtype=np.float32,
initializer=init_ops.constant_initializer(2))
sess.run(variables.global_variables_initializer())
r1, r2 = sess.run([x, y])
self.assertAllClose(r1, 1)
self.assertAllClose(r2, 2)
def testRead(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with ops.device("/device:IPU:0"):
with self.session() as sess:
with variable_scope.variable_scope("vs", use_resource=True):
z = variable_scope.get_variable(
"z",
shape=[],
dtype=np.float32,
initializer=init_ops.constant_initializer(3))
sess.run(variables.global_variables_initializer())
r = sess.run(z.read_value())
self.assertAllClose(r, 3)
def testAssign(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with ops.device("/device:IPU:0"):
with self.session() as sess:
with variable_scope.variable_scope("vs", use_resource=True):
z = variable_scope.get_variable(
"z",
shape=[],
dtype=np.float32,
initializer=init_ops.constant_initializer(0))
sess.run(variables.global_variables_initializer())
sess.run(state_ops.assign(z, 2))
r = sess.run(z)
self.assertAllClose(r, 2)
sess.run(state_ops.assign_add(z, 6))
r = sess.run(z)
self.assertAllClose(r, 8)
def testGradientDescent(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4, 2],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]],
dtype=np.float32)))
b = variable_scope.get_variable(
"b",
shape=[2],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(np.float32, shape=[1, 4])
y = math_ops.matmul(x, w) + b
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
sess.run(variables.global_variables_initializer())
sess.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
vw, vb = sess.run([w, b])
self.assertAllClose(np.array(
[[0.3, 1.3], [2.7, 3.7], [4.5, 5.5], [6.1, 7.1]], dtype=np.float32),
vw,
rtol=1e-4)
self.assertAllClose(np.array([1.9, 2.9], dtype=np.float32),
vb,
rtol=1e-4)
def testRepeatedGradientDescent(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4, 2],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]],
dtype=np.float32)))
b = variable_scope.get_variable(
"b",
shape=[2],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(np.float32, shape=[1, 4])
y = math_ops.matmul(x, w) + b
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
sess.run(variables.global_variables_initializer())
sess.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
sess.run(train, {x: np.array([[1, 2, 3, 4]], dtype=np.float32)})
sess.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
sess.run(train, {x: np.array([[1, 2, 3, 4]], dtype=np.float32)})
sess.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
vw, vb = sess.run([w, b])
self.assertAllClose(
np.array([[-1.3, -0.3], [1.7, 2.7], [2.9, 3.9], [3.5, 4.5]],
dtype=np.float32),
vw,
rtol=1e-4)
self.assertAllClose(np.array([1.5, 2.5], dtype=np.float32),
vb,
rtol=1e-4)
def testMultipleUpdate(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
z = variable_scope.get_variable(
"z",
shape=[],
dtype=np.float32,
initializer=init_ops.constant_initializer(0))
updater = state_ops.assign_add(z, 1.0)
sess.run(variables.global_variables_initializer())
sess.run(updater)
sess.run(updater)
sess.run(updater)
sess.run(updater)
sess.run(updater)
sess.run(updater)
sess.run(updater)
sess.run(updater)
sess.run(updater)
sess.run(updater)
r = sess.run(z)
self.assertAllClose(r, 10.0)
def testRandomNormalInitalizer(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
i = init_ops.random_normal_initializer(mean=2.0, stddev=0.01)
z = variable_scope.get_variable("z1",
shape=[],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
report = pva.openReport(report_helper.find_report())
o = sess.run(z)
self.assertAllClose(o, 2.0, 0.2, 0.2)
ok = [
'vs/z1/Initializer/random_normal/RandomStandardNormal/fusion/normal'
]
self.assert_all_compute_sets_and_list(report, ok)
def testRandomNormalNonScalarInitalizer(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
i = init_ops.random_normal_initializer(mean=2.0, stddev=0.01)
z = variable_scope.get_variable("z1",
shape=[2],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
report = pva.openReport(report_helper.find_report())
o = sess.run(z)
self.assertAllClose(o, [2.0, 2.0], 0.2, 0.2)
ok = [
'vs/z1/Initializer/random_normal/RandomStandardNormal/fusion/normal'
]
self.assert_all_compute_sets_and_list(report, ok)
def testDefaultRandomNormalInitalizer(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
i = init_ops.random_normal_initializer()
z = variable_scope.get_variable("z1",
shape=[],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
o = sess.run(z)
self.assertAllClose(o, 0.0, 1.0, 3.0)
def testTruncatedNormalScalarInitalizer(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("", use_resource=True):
i = init_ops.truncated_normal_initializer(mean=1.0, stddev=0.01)
z = variable_scope.get_variable("z1",
shape=[],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
o = sess.run(z)
self.assertAllClose(o, 1.0, 0.2, 0.2)
# Find of the names of compute sets
report = pva.openReport(report_helper.find_report())
# pylint: disable=line-too-long
ok = [
'z1/Initializer/truncated_normal/TruncatedNormal/truncated-normal*/truncatedNormal',
'z1/Initializer/truncated_normal/mul',
'z1/Initializer/truncated_normal/add'
]
# pylint: enable=line-too-long
self.assert_all_compute_sets_and_list(report, ok)
def testTruncatedNormalInitalizer(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("", use_resource=True):
i = init_ops.truncated_normal_initializer(mean=1.0, stddev=0.01)
z = variable_scope.get_variable("z1",
shape=[2, 4],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
o = sess.run(z)
self.assertAllClose(o, np.ones((2, 4)), 0.2, 0.2)
# Find of the names of compute sets
report = pva.openReport(report_helper.find_report())
# pylint: disable=line-too-long
ok = [
'z1/Initializer/truncated_normal/TruncatedNormal/truncated-normal*/truncatedNormal',
'z1/Initializer/truncated_normal/scaled-inplace',
]
# pylint: enable=line-too-long
self.assert_all_compute_sets_and_list(report, ok)
def testDefaultTruncatedNormalScalarInitalizer(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("", use_resource=True):
i = init_ops.truncated_normal_initializer()
z = variable_scope.get_variable("z1",
shape=[],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
o = sess.run(z)
self.assertAllClose(o, 1.0, 2.0, 2.0)
# Find of the names of compute sets
report = pva.openReport(report_helper.find_report())
# pylint: disable=line-too-long
ok = [
'z1/Initializer/truncated_normal/TruncatedNormal/truncated-normal*/truncatedNormal'
]
# pylint: enable=line-too-long
self.assert_all_compute_sets_and_list(report, ok)
def testDefaultTruncatedNormalInitalizer(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("", use_resource=True):
i = init_ops.truncated_normal_initializer()
z = variable_scope.get_variable("z1",
shape=[2, 4],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
o = sess.run(z)
self.assertAllClose(o, np.ones((2, 4)), 2.0, 2.0)
# Find of the names of compute sets
report = pva.openReport(report_helper.find_report())
# pylint: disable=line-too-long
ok = [
'z1/Initializer/truncated_normal/TruncatedNormal/truncated-normal*/truncatedNormal'
]
# pylint: enable=line-too-long
self.assert_all_compute_sets_and_list(report, ok)
def testUniformRandomInitalizer(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
i = init_ops.random_uniform_initializer(minval=-2.0, maxval=2.0)
z = variable_scope.get_variable("z1",
shape=[],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
report = pva.openReport(report_helper.find_report())
o = sess.run(z)
self.assertAllClose(o, 0.0, 2.0, 2.0)
ok = ['vs/z1/Initializer/random_uniform/RandomUniform/fusion/uniform']
self.assert_all_compute_sets_and_list(report, ok)
def testUniformRandomNonScalarInitalizer(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
i = init_ops.random_uniform_initializer(minval=-2.0, maxval=2.0)
z = variable_scope.get_variable("z1",
shape=[2],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
report = pva.openReport(report_helper.find_report())
o = sess.run(z)
self.assertAllClose(o, [0.0, 0.0], 2.0, 2.0)
ok = ['vs/z1/Initializer/random_uniform/RandomUniform/fusion/uniform']
self.assert_all_compute_sets_and_list(report, ok)
def testDefaultUniformRandomInitalizer(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with ops.device("/device:IPU:0"):
with self.session() as sess:
with variable_scope.variable_scope("vs", use_resource=True):
i = init_ops.random_uniform_initializer()
z = variable_scope.get_variable("z1",
shape=[],
dtype=np.float32,
initializer=i)
sess.run(variables.global_variables_initializer())
o = sess.run(z)
self.assertAllClose(o, 0.5, 0.5, 0.5)
def testVariablesRemainResident(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4, 2],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]],
dtype=np.float32)))
b = variable_scope.get_variable(
"b",
shape=[2],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(np.float32, shape=[1, 4])
y = math_ops.matmul(x, w) + b
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
report_json = tu.ReportJSON(self, sess)
sess.run(variables.global_variables_initializer())
report_json.reset()
sess.run([train, loss], {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
sess.run([train, loss], {x: np.array([[1, 2, 3, 4]], dtype=np.float32)})
sess.run([train, loss], {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
sess.run([train, loss], {x: np.array([[1, 2, 3, 4]], dtype=np.float32)})
sess.run([train, loss], {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
w_dl = "1.0"
w_ul = "out_1.0"
b_dl = "2.0"
b_ul = "out_2.0"
report_json.parse_log()
# The initialization is constant, so there are no events generated on the
# IPU.
report_json.assert_host_to_device_event_names(
[w_dl, b_dl],
"Weights/biases should be downloaded once, and the input no times "
"because it is streamed")
report_json.assert_device_to_host_event_names(
[],
"Weights/biases should not be uploaded, and the loss is streamed")
# Explicitly fetch the weights
vw, vb = sess.run([w, b])
self.assertAllClose(
np.array([[-1.3, -0.3], [1.7, 2.7], [2.9, 3.9], [3.5, 4.5]],
dtype=np.float32),
vw,
rtol=1e-4)
self.assertAllClose(np.array([1.5, 2.5], dtype=np.float32),
vb,
rtol=1e-4)
report_json.parse_log()
report_json.assert_host_to_device_event_names(
[], "Weights/biases/inputs should not be downloaded at all")
report_json.assert_device_to_host_event_names(
[w_ul, b_ul],
"Weights/biases should be uploaded once (explicitly fetched)")
def testResourceCountsAreCorrect(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
w1 = variable_scope.get_variable(
"w1",
shape=[4, 2],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]],
dtype=np.float32)))
b1 = variable_scope.get_variable(
"b1",
shape=[2],
dtype=np.float32,
trainable=False,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
w2 = variable_scope.get_variable(
"w2",
shape=[2, 2],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4]], dtype=np.float32)))
b2 = variable_scope.get_variable(
"b2",
shape=[2],
dtype=np.float32,
trainable=False,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(np.float32, shape=[1, 4])
y = math_ops.matmul(x, w1) + b1
y = math_ops.matmul(y, w2) + b2
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
report_json = tu.ReportJSON(self, sess)
sess.run(variables.global_variables_initializer())
report_json.reset()
sess.run([train, loss], {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
sess.run([train, loss], {x: np.array([[1, 2, 3, 4]], dtype=np.float32)})
sess.run([train, loss], {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
sess.run([train, loss], {x: np.array([[1, 2, 3, 4]], dtype=np.float32)})
sess.run([train, loss], {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
w1_dl = "1.0"
b1_dl = "2.0"
w2_dl = "3.0"
b2_dl = "4.0"
# biases are not outputs of the graph
w1_ul = "out_1.0"
w2_ul = "out_2.0"
report_json.parse_log()
# The initialization is constant, so there are no events generated on the
# IPU.
report_json.assert_host_to_device_event_names(
[w1_dl, b1_dl, w2_dl, b2_dl],
"Weights/biases should be downloaded once, and the input no times "
"because it is streamed")
# Weights should not be uploaded, and the loss is streamed
report_json.assert_device_to_host_event_names(
[],
"Weights/biases should not be uploaded, and the loss is streamed")
# Explicitly fetch the first set of weights and biases
vw, vb = sess.run([w1, b1])
self.assertAllClose(np.array(
[[100.00576782, 86.60944366], [57.62784195, 51.23856354],
[93.45920563, 82.40240479], [155.36032104, 135.74447632]],
dtype=np.float32),
vw,
rtol=1e-4)
self.assertAllClose(np.array([2, 3], dtype=np.float32), vb, rtol=1e-4)
report_json.parse_log()
report_json.assert_host_to_device_event_names(
[], "Weights/biases/inputs should not be downloaded at all")
# Note all weights are fetched as a group
report_json.assert_device_to_host_event_names(
[w1_ul, w2_ul],
"Weights/biases should be uploaded once (explicitly fetched)")
def testTuplesOfTuplesAreStreamed(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.configure_ipu_system()
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
pa = array_ops.placeholder(np.int64, [2, 2], name="a")
pb = array_ops.placeholder(np.int64, [2, 2], name="b")
pc = array_ops.placeholder(np.int64, [2, 2], name="c")
c = control_flow_ops.tuple((pa + pc, pb + pc))
report_json = tu.ReportJSON(self, sess)
report_json.reset()
in0 = np.full((2, 2), 7)
in1 = np.full((2, 2), 6)
in2 = np.full((2, 2), 5)
fd = {
pa: in0,
pb: in1,
pc: in2,
}
out = sess.run(c, fd)
self.assertEqual(len(out), 2)
self.assertAllClose(out, (np.full((2, 2), 12), np.full((2, 2), 11)))
report_json.parse_log()
report_json.assert_host_to_device_event_names(
[], "No io events implies the data was streamed")
report_json.assert_device_to_host_event_names(
[], "No io events implies the data was streamed")
def testNonModifiedResourceIsNotOverwrittenInPlaceOp(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.configure_ipu_system()
# This test verifies that if we have a resource varaible (w) which is marked
# as not modified then a copy is inserted to make sure it is not overwritten
# between executions if it is used by an inplace op
w_val = [1, 2, 3, 4]
with self.session() as sess:
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("vs", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4],
dtype=np.float32,
initializer=init_ops.constant_initializer(
np.array(w_val, dtype=np.float32)))
px = array_ops.placeholder(np.float32, shape=[4])
y = w + px
report_json = tu.ReportJSON(self, sess)
sess.run(variables.global_variables_initializer())
report_json.reset()
xs = [
np.array([7, 3, 5, 9], dtype=np.float32),
np.array([1, 8, 3, 4], dtype=np.float32),
np.array([9, 2, 2, 6], dtype=np.float32)
]
for x in xs:
out = sess.run(y, {px: x})
self.assertAllClose(out, x + w_val)
report_json.parse_log()
w_dl = "1.0"
report_json.assert_host_to_device_event_names(
[w_dl], "w should be copied to device once and "
"that should be the only io event")
report_json.assert_device_to_host_event_names(
[], "w should be copied to device once and "
"that should be the only io event")
def testGetConstantOutOfResourceVariable(self):
with ops.device("/device:IPU:0"):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x):
return array_ops.reshape(
x,
[math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
# OK since the value is known at compile time.
out = f(random_ops.random_normal([10, 10]))
self.assertEqual(out.shape[0], 50)
self.assertEqual(out.shape[1], 2)
def testGetConstantOutOfResourceVariableAfterWrite(self):
with ops.device("/device:IPU:0"):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x, val1, val2):
a.assign(math_ops.cast(val1, dtypes.float32))
b.assign(math_ops.cast(val2, dtypes.float32))
return array_ops.reshape(
x,
[math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
val1 = constant_op.constant(2)
val2 = constant_op.constant(50)
# Returns an error, since the value known at compile time was overriden.
with self.assertRaisesRegex(errors.InvalidArgumentError,
'concrete values at compile time'):
f(random_ops.random_normal([10, 10]), val1, val2)
def testGetConstantOutOfResourceVariableBeforeWrite(self):
with ops.device("/device:IPU:0"):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x, val1, val2):
out = array_ops.reshape(
x,
[math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
a.assign(math_ops.cast(val1, dtypes.float32))
b.assign(math_ops.cast(val2, dtypes.float32))
return out
val1 = constant_op.constant(2)
val2 = constant_op.constant(50)
# OK since the write happens after the reshape.
out = f(random_ops.random_normal([10, 10]), val1, val2)
self.assertEqual(out.shape[0], 50)
self.assertEqual(out.shape[1], 2)
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=1 ' +
os.environ.get('TF_XLA_FLAGS', ''))
googletest.main()
|
#coding: utf-8
from django.contrib import admin
from ppa_participativo.diretrizes.models import Eixo, Area, Acao
class EixoAdmin(admin.ModelAdmin):
list_display = ('descricao', 'ativo',)
list_filter = ['dt_cadastro', ]
class AreaAdmin(admin.ModelAdmin):
list_display = ('descricao', 'fk_eixo', 'ativo',)
list_filter = ['dt_cadastro', 'fk_eixo', ]
class AcaoAdmin(admin.ModelAdmin):
list_display = ('descricao', 'fk_area', 'ativo',)
list_filter = ['dt_cadastro', 'fk_area', ]
admin.site.register(Eixo, EixoAdmin)
admin.site.register(Area, AreaAdmin)
admin.site.register(Acao, AcaoAdmin)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-08 23:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20160605_2219'),
]
operations = [
migrations.CreateModel(
name='Instructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Transcript',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RenameField(
model_name='course',
old_name='Credits',
new_name='credits',
),
migrations.RemoveField(
model_name='course',
name='Instructors',
),
migrations.RemoveField(
model_name='course',
name='Level',
),
migrations.RemoveField(
model_name='course',
name='Location',
),
migrations.RemoveField(
model_name='course',
name='Number',
),
migrations.RemoveField(
model_name='course',
name='Prereqs',
),
migrations.RemoveField(
model_name='course',
name='Title',
),
migrations.AddField(
model_name='course',
name='level',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='course',
name='location',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='course',
name='number',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='course',
name='prerequisites',
field=models.ManyToManyField(related_name='_course_prerequisites_+', to='app.Course'),
),
migrations.AddField(
model_name='course',
name='title',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='course',
name='ID',
field=models.CharField(blank=True, max_length=6),
),
migrations.AddField(
model_name='transcript',
name='courses',
field=models.ManyToManyField(to='app.Course'),
),
migrations.AddField(
model_name='course',
name='instructors',
field=models.ManyToManyField(to='app.Instructor'),
),
]
|
from datetime import date, timedelta
from django.db import models
from django.db.models import Avg, Sum
from django.db.models.signals import post_save
from django.dispatch import receiver
class Currency(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Exchange(models.Model):
date = models.DateField()
currency_from = models.ForeignKey(Currency)
currency_to = models.ForeignKey(Currency, related_name='currency_to')
rate = models.DecimalField(decimal_places=8, max_digits=14)
class Meta:
unique_together = ('date', 'currency_from', 'currency_to')
def __str__(self):
return str(self.date)
@staticmethod
def average_week(obj_date, currency_from, currency_to):
d= obj_date - timedelta(days=7)
average = Exchange.objects.filter(currency_from=currency_from,
currency_to=currency_to,
date__range=(d, obj_date)
).aggregate(Avg('rate'))
return average
@staticmethod
def save_swap(obj):
try:
if obj.currency_from == obj.currency_to:
Exchange.objects.create(date= obj.date,
currency_from = obj.currency_to,
currency_to = obj.currency_from,
rate = obj.rate)
else:
Exchange.objects.create(date= obj.date,
currency_from = obj.currency_to,
currency_to = obj.currency_from,
rate = 1/obj.rate)
except Exception:
pass
|
import datetime
import json
from collections import namedtuple
from copy import deepcopy
from os import listdir
import Augmentor
import numpy as np
from PIL import Image
from src.data.constants import LayerType
from src.data.setup import Constants
'''
I do not own this '
taken from: https://github.com/huyouare/CS231n/blob/master/assignment2/cs231n/im2col.py
'''
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (int)((H + 2 * padding - field_height) / stride + 1)
out_width = (int)((W + 2 * padding - field_width) / stride + 1)
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
if (len(x.shape) == 3):
x = np.expand_dims(x, axis=1)
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,
stride)
cols = x_padded[:, k, i, j]
# C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * x.shape[1], -1)
return cols
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
""" An implementation of col2im based on fancy indexing and np.add.at """
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,
stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
def scaleBetweenValues(array, lowerBound=0, upperBound=1, dtype=int):
min = np.min(array)
max = np.amax(array)
normalized = array[:]
normalized -= min
normalized *= ((upperBound - lowerBound) / (max - min) + lowerBound)
return np.ndarray.astype(normalized, dtype)
'''
save feature map of conv as pngs
'''
def exportPNGs(featured, opType):
for img in featured:
copy = deepcopy(img)
copy = scaleBetweenValues(copy, 0, 255)
fromarray = Image.fromarray(copy)
grayscale = fromarray.convert('L')
resized = grayscale.resize((100, 100))
now = datetime.datetime.now()
resized.save(
Constants.FEATURES_ROOT + '/' + str(now.strftime("%Y-%m-%d-%Hhh%Mmm")) + '-' + opType + "-" + str(
id(img)) + '.png')
'''
export model`s training as html
'''
def exportHistory(export, modelJSON):
history = export
now = datetime.datetime.now()
file = open(Constants.HISTORY_ROOT + '/' + str(now.strftime("%Y-%m-%d-%H-%M")) + '.html', 'w+')
file.write('<h2> configuration </h2>')
file.write(modelJSON)
file.write('<br>')
file.write('<table style="border:1px solid black;" cellpadding="10">')
file.write('<tr><th>EPOCH</th><th>LOSS</th><th>ACCURACY</th></tr>')
epoch = 1
for step in history:
file.write('<tr>')
file.write('<td>' + str(epoch) + '</td>')
file.write('<td>' + str(step[0]) + '</td>')
file.write('<td>' + str(int(step[1] * 100)) + "%" + '</td>')
file.write('</tr>')
epoch += 1
file.write('</table>')
def exportModel(layers, prediction):
now = datetime.datetime.now()
file = open(Constants.MODEL_ROOT + '/' + 'model-' + str(now.strftime("%Y-%m-%d-%H-%M")) + '.json', 'w+')
layersDef = []
for layer, type in layers:
convParams = None
weights = layer.getFormattedWeights().tolist()
biases = None
if (type == LayerType.CONV):
convParams = layer.getConvParams()
biases = layer.getBiases().tolist()
if (type == LayerType.HIDDEN):
# two more dimension is needed for hidden layers
biases = layer.getBiases().tolist()
weights = [[weights]]
layersDef.append(json.dumps(
{'type': str(type.name), 'activation': layer.getActivation().getType().name,
'weights': weights, "biases": biases, "convParams": convParams}))
layersString = ','.join(layersDef)
sample = {"data": prediction[0].tolist(), "result": prediction[1].tolist(), "probabilities": prediction[2].tolist()}
file.write('{"model": {'
+ '"layers": [' + layersString + "]" +
', "sample": ' + str(sample).replace("'", '"') +
'} }')
def augmentateDataset(samples):
for folder in listdir(Constants.DATASET_ROOT):
p = Augmentor.Pipeline(Constants.DATASET_ROOT + folder)
p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10)
p.flip_left_right(probability=0.5)
p.random_brightness(0.6, 0.3, 0.8)
p.scale(0.4, 1.6)
p.sample(samples)
def parseJSON(jsonValue):
return json.loads(jsonValue, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
|
"""Write a function that encrypts a string with a variable rotary cipher.
The function should take in a number and string and shift the string's
characters by that number:
>>> rot_encode(1, 'abcxyz')
'bcdyza'
It should be able to shift characters by any number:
>>> rot_encode(3, 'abcxyz')
'defabc'
It should preserve capitalization, whitespace, and any special characters:
>>> rot_encode(1, 'Wow! This is 100% amazing.')
'Xpx! Uijt jt 100% bnbajoh.'
"""
def rot_encode(shift, txt):
"""Encode `txt` by shifting its characters to the right."""
new_string = []
alpha = "abcdefghijklmnopqrstuvwxyz"
upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alpha_list = [let for let in alpha]
upper_list = [let for let in upper]
for char in txt:
if char.isalpha():
if char.isupper():
char_indx = upper_list.index(char) #is a number
new_string.append(upper_list[char_indx - (26 - shift)])
else:
char_indx = alpha_list.index(char) #is a number
new_string.append(alpha_list[char_indx - (26 - shift)])
else: new_string.append(char)
return "".join(new_string)
if __name__ == '__main__':
import doctest
if doctest.testmod().failed == 0:
print('\n✨ ALL TESTS PASSED!\n')
|
import math
x = int(input())
targetMoney = x
currentMoney = 100
yearCounter = 0
while currentMoney < targetMoney:
yearCounter += 1
currentMoney += currentMoney // 100
print(yearCounter)
|
import os
import random
from copy import deepcopy
import logging
import time
import json
import numpy as np
import torch
from sentencepiece import SentencePieceProcessor as sp
from config import Config
class Reader:
def __init__(self, config):
self.tokenizer = sp(config.kogpt2_tokenizer_path)
self.train_data = []
self.dev_data = []
self.data_path = config.data_path
self.batch_size = config.batch_size
self.max_length = config.max_length
self.vocab_size = config.vocab_size
self.bos_idx = config.bos_idx
self.eos_idx = config.eos_idx
self.pad_idx = config.pad_idx
def load_data(self):
self.train_data = json.load(open(os.path.join(self.data_path, "train_data.json"), "r"))
self.dev_data = json.load(open(os.path.join(self.data_path, "dev_data.json"), "r"))
def make_batch(self, mode="train"):
if mode == "train":
data = self.train_data
else:
data = self.dev_data
all_batches = []
batch = []
for doc_id, doc in data.items():
batch.append(doc)
if len(batch) == self.batch_size:
all_batches.append(batch)
batch = []
if len(batch) > 0:
all_batches.append(batch)
random.shuffle(all_batches)
for batch in all_batches:
yield batch
def make_input(self, batch, train=True):
batch_size = len(batch)
inputs = torch.ones(batch_size, self.max_length, dtype=torch.int64).cuda() * self.pad_idx
labels = torch.ones(batch_size, self.max_length, dtype=torch.int64).cuda() * self.pad_idx
doc_lengths = []
max_length = 0
max_label_length = 0
for batch_idx in range(batch_size):
document = self.tokenizer.EncodeAsIds(batch[batch_idx]["document"] + " ; Summary: ")
summary = self.tokenizer.EncodeAsIds(batch[batch_idx]["summary"])
if train:
document = document[-(self.max_length - len(summary) - 1):]
context = document + summary
else:
document = document[-self.max_length:]
context = document
doc_lengths.append(len(document))
length = len(context)
inputs[batch_idx, :length] = torch.tensor(context, dtype=torch.int64)
if train:
labels[batch_idx, :length] = torch.tensor(context[1:] + [self.eos_idx], dtype=torch.int64)
else:
label_length = len(summary) + 1
labels[batch_idx, :label_length] = torch.tensor(summary + [self.eos_idx], dtype=torch.int64)
max_label_length = max(max_label_length, len(summary)+1)
max_length = max(max_length, length)
inputs = inputs[:, :max_length]
labels = labels[:, :max_length] if train else labels[:, :max_label_length]
return inputs, labels, doc_lengths
if __name__ == "__main__":
config = Config()
parser = config.parser
config = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
logger.addHandler(handler)
reader = Reader(config)
logger.info("Load data...")
start = time.time()
reader.load_data()
end = time.time()
logger.info("{} secs".format(end-start))
logger.info("Make batch...")
start = time.time()
iterator = reader.make_batch("dev")
end = time.time()
logger.info("{} secs".format(end-start))
for batch in iterator:
inputs, labels, doc_lengths = reader.make_input(batch)
|
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django.db.models.signals import post_save
class DirtyFieldsMixin(object):
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(reset_state, sender=self.__class__,
dispatch_uid='%s-DirtyFieldsMixin-sweeper' % self.__class__.__name__)
reset_state(sender=self.__class__, instance=self)
def _as_dict(self):
return dict([(f.name, getattr(self, f.name)) for f in self._meta.local_fields if not f.rel])
def get_dirty_fields(self):
new_state = self._as_dict()
return dict([(key, value) for key, value in self._original_state.items() if value != new_state[key]])
def is_dirty(self):
# in order to be dirty we need to have been saved at least once, so we
# check for a primary key and we need our dirty fields to not be empty
if not self.pk:
return True
return {} != self.get_dirty_fields()
def reset_state(sender, instance, **kwargs):
instance._original_state = instance._as_dict()
|
# Generated by Django 2.0.8 on 2018-08-21 20:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0091_user_mc_pk'),
]
operations = [
migrations.RemoveField(
model_name='person',
name='current_through',
),
]
|
#import necessary libraries
import os
import keras
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from A4.mixup_generator import MixupImageDataGenerator
# setup current work path
baseDir = os.path.abspath('.')
modelPath = os.path.join(baseDir,'A4','efficientNetMixUp_best.h5')
img = os.path.join(baseDir ,'Datasets','train')
# load pre-trained model
final_model = keras.models.load_model(modelPath)
# configure image generator
img_gen = ImageDataGenerator(rescale = 1.0/255.0, horizontal_flip = True, vertical_flip = True,
fill_mode = 'nearest', rotation_range = 10, width_shift_range = 0.2,
height_shift_range= 0.2, shear_range= 0.2, brightness_range= (0.5,1.2),
zoom_range = 0.2)
def preProcessing():
# read image and label csv file of train, valid, test set
train = os.path.join(baseDir ,'Datasets','train_img.csv')
test = os.path.join(baseDir ,'Datasets','test_img.csv')
train_img=pd.read_csv(train)
test_img=pd.read_csv(test)
train_img.drop(columns=['Unnamed: 0'],inplace=True)
test_img.drop(columns=['Unnamed: 0'],inplace=True)
# map string type labels to int type
real_labels = {'cbb':0,'cbsd':1,'cgm':2,'cmd':3,'healthy':4}
train_img['class_num'] = train_img['label'].map(real_labels)
test_img['class_num'] = test_img['label'].map(real_labels)
return train_img,test_img
def train(train_img):
# feed images into neural network
train = MixupImageDataGenerator(train_img, generator=img_gen,directory=img,x_col = 'filename', y_col = 'label',
batch_size=12,target_size =(220, 220))
# evaluate_generator will return loss and acc of the prediction
loss,acc = final_model.evaluate_generator(train,steps=train.n//12)
return acc
def test(test_img):
test = img_gen.flow_from_dataframe(test_img, directory = img,x_col = 'filename', y_col = 'label',
target_size =(220, 220), class_mode = 'categorical', shuffle = False,
batch_size = 12, color_mode = 'rgb')
loss,acc = final_model.evaluate_generator(test,steps=test.n//12)
return acc
|
from pymongo import MongoClient
import RPi.GPIO as GPIO
from hw_pins import hw_pins
import threading
import socket
import pika
import time
import pickle
from rmq_params import rmq_params, rmq_routing_keys
import pytz
from datetime import datetime
current_id = None
def get_current_time():
tz = pytz.timezone('US/Eastern')
current_time = datetime.now(tz)
current_time = current_time.strftime("%m-%d-%H-%M")
return current_time
def is_valid_time(start_time, end_time, given):
"""
This parses the hours from the given range that was input
:param start_time:
:param end_time:
:param given:
:return:
"""
# get starting times
start_hour = int(start_time.split("-")[2])
start_minute = int(start_time.split("-")[3])
# get ending times
end_hour = int(end_time.split("-")[2])
end_minute = int(end_time.split("-")[3])
# get the current time
cur_hour = int(given.split("-")[2])
cur_minute = int(given.split("-")[3])
# now compare the values to see if it is time for them
if cur_hour >= start_hour:
# compare to see if it is within 15 mins of ending
time_left = (end_hour - cur_hour) * 60 + (end_minute - cur_minute)
if time_left <= 0:
return "no"
print(time_left)
if time_left < 15:
print("Warn the user time is almost up.")
return "almost"
else:
print("say the user is good now")
return "good"
else:
print("say the user is not good.")
return "no"
# setup the mongodb here
client = MongoClient('mongodb://localhost:27017/')
# Get the database to use for mongodb
db = client.hokie_id
collection = db.student_ids
def setup_rmq():
# setup the rabbitMQ queues here
# The queue name that is appended with a number
order_base_queue_name = "order"
order_queue_num = 1
username = rmq_params["username"]
pword = rmq_params["password"]
ip_to_run = '0.0.0.0'
virtual_host = rmq_params["vhost"]
credentials = pika.PlainCredentials(username, pword)
parameters = pika.ConnectionParameters(host=ip_to_run, virtual_host=virtual_host, port=5672, credentials=credentials,
socket_timeout=1000)
connection = pika.BlockingConnection(parameters)
print("[Checkpoint] Connected to vhost %s on RMQ server at %s as user %s" % (virtual_host, ip_to_run, username))
# Need to make the channel for the queues to talk through
channel = connection.channel()
print("[Checkpoint] Setting up exchanges and queues...")
# The server's job is to create the queues that are needed
channel.exchange_declare(rmq_params["exchange"], exchange_type='direct')
# make all the queues for the service
channel.queue_declare(rmq_params["valid_queue"],auto_delete=False)
channel.queue_declare(rmq_params["id_queue"], auto_delete=False)
channel.queue_declare(rmq_params["ffa_queue"], auto_delete=False)
channel.queue_bind(exchange=rmq_params["exchange"], queue=rmq_params["ffa_queue"], routing_key=rmq_routing_keys["ffa_queue"])
channel.queue_bind(exchange=rmq_params["exchange"], queue=rmq_params["id_queue"], routing_key=rmq_routing_keys["id_queue"])
channel.queue_bind(exchange=rmq_params["exchange"], queue=rmq_params["valid_queue"], routing_key=rmq_routing_keys["valid_queue"])
print("[Checkpoint] Connected to vhost %s on RMQ server at %s as user %s" % (virtual_host, ip_to_run, username))
return channel
def listen_for_times_to_enter():
print("RUnning socket boi")
TCP_IP = '0.0.0.0'
TCP_PORT = 6969
BUFFER_SIZE = 1024 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
while 1:
conn, addr = s.accept()
print('Connection address:', addr)
while 1:
data = conn.recv(BUFFER_SIZE)
if not data:
break
data = data.decode('utf-8')
print("received data:", data)
data = data.split(",")
id = data[0]
start_time = data[1]
end_time = data[2]
to_insert = {"id": id, "start_time": start_time, "end_time": end_time}
print("I'm inserting this: " + str(to_insert))
# Remove all the others from the database here
result = collection.delete_many({'id':id})
# now insert my thing above
collection.insert_one(to_insert)
print("Just inserted it")
conn.close()
count = 0
def motion_handler():
# Need to make the channel for the queues to talk through
channel = setup_rmq()
# Now stop and listen on the id queue
def motion_callback(ch, method, properties, body):
# slow down the socket conneciton
global count
count += 1
if count <= 10:
return
count = 0
value = body.decode("utf-8")
print("Received %s" % value)
# Now send it to the server here
TCP_IP = '0.0.0.0'
TCP_PORT = 9696
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
MESSAGE = value
print("sending: " + MESSAGE)
s.connect((TCP_IP, TCP_PORT))
s.send(MESSAGE.encode())
s.close()
print("Done")
# Sets up the callback that is used
queue_name = rmq_params["ffa_queue"]
channel.basic_consume(motion_callback, queue_name, no_ack=True)
print("[Checkpoint] Consuming from RMQ queue: %s" % queue_name)
# Start consuming the messages here
channel.start_consuming()
def time_watcher():
global current_id
channel = setup_rmq()
while 1:
# sleep for a second
time.sleep(1)
print("Checking")
# if nobody has claimed the spot yet, don't need to do anything
if current_id == None:
continue
result = collection.find_one({'id': current_id})
if result:
print("Id is not updated")
cur = get_current_time()
start = result['start_time']
end = result['end_time']
response = is_valid_time(start, end, cur)
else:
# make the current time slot available
response = "no"
# make the response a no
if response == "no":
current_id = None
# Signal we have sent the order to the server
channel.basic_publish(exchange=rmq_params["exchange"], routing_key=rmq_routing_keys["valid_queue"], body=response)
def reserver_thread():
# Need to make the channel for the queues to talk through
channel = setup_rmq()
# Now stop and listen on the id queue
def callback(ch, method, properties, body):
global current_id
value = body.decode("utf-8")
print("Received %s" % value)
# Checks the mongodatabase
# This should send it through a rabbitMQ message queue
result = collection.find_one({'id': value})
# makes sure the right person is accessing, and that the current person still has it reserved
#if result and not current_id:
if result:
current_id = value
cur = get_current_time()
start = result['start_time']
end = result['end_time']
response = is_valid_time(start, end, cur)
else:
#current_id = None
response = "no"
# Signal we have sent the order to the server
ch.basic_publish(exchange=rmq_params["exchange"], routing_key=rmq_routing_keys["valid_queue"], body=response)
# Sets up the callback that is used
queue_name = rmq_params["id_queue"]
channel.basic_consume(callback, queue_name, no_ack=True)
print("[Checkpoint] Consuming from RMQ queue: %s" % queue_name)
# Start consuming the messages here
channel.start_consuming()
# now start two threads and pass the channel onto them so they can start listening on them
rt = threading.Thread(name='reserve', target=reserver_thread)
rt.start()
mt = threading.Thread(name='motion', target=motion_handler)
mt.start()
sockme = threading.Thread(name='mongo_enterer', target= listen_for_times_to_enter)
sockme.start()
#timer = threading.Thread(name='timer', target=time_watcher)
#timer.start()
|
"""Rewrites raw M-Lab FQDNs to apply post-processing or annotations."""
import logging
from mlabns.util import message
def rewrite(fqdn, address_family, tool_id):
"""Rewrites an FQDN to add necessary annotations and special-casing.
Performs the following rewrites on an FQDN:
* Adds a v4/v6 annotation if the client requested an explicit address family
* Applies a workaround to fix FQDNs for NDT-SSL and ndt7 queries.
Args:
fqdn: A tool FQDN with no address family specific annotation.
address_family: The address family for which to create the FQDN or None
to create an address family agnostic FQDN.
tool_id: Name of tool associated with the FQDN (e.g. 'ndt_ssl').
Returns:
FQDN after rewriting to apply all modifications to the raw FQDN.
"""
rewritten_fqdn = _apply_af_awareness(fqdn, address_family)
# If this is ndt_ssl or ndt7, apply the special case workaround.
if tool_id == 'ndt_ssl' or tool_id == 'ndt7':
rewritten_fqdn = _apply_ndt_ssl_workaround(rewritten_fqdn)
return rewritten_fqdn
def _apply_af_awareness(fqdn, address_family):
"""Adds the v4/v6 only annotation to the fqdn.
Example:
fqdn: 'ndt.iupui.mlab3.ath01.measurement-lab.org'
ipv4 only: 'ndt.iupui.mlab3v4.ath01.measurement-lab.org'
ipv6 only: 'ndt.iupui.mlab3v6.ath01.measurement-lab.org'
Args:
fqdn: A tool FQDN with no address family specific annotation.
address_family: The address family for which to create the FQDN or None
to create an address family agnostic FQDN.
Returns:
A FQDN specific to a particular address family, or the original FQDN
if an address family is not specified.
"""
if not address_family:
fqdn_annotation = ''
elif address_family == message.ADDRESS_FAMILY_IPv4:
fqdn_annotation = 'v4'
elif address_family == message.ADDRESS_FAMILY_IPv6:
fqdn_annotation = 'v6'
else:
logging.error('Unrecognized address family: %s', address_family)
return fqdn
fqdn_parts = _split_fqdn(fqdn)
fqdn_parts[2] += fqdn_annotation
return '.'.join(fqdn_parts)
def _apply_ndt_ssl_workaround(fqdn):
"""Rewrites ndt_ssl/ndt7 FQDNs to use dash separators for subdomains.
The NDT-SSL test uses dashes instead of dots as separators in the subdomain,
but Nagios currently reports the FQDNs as using dots.
For example, instead of:
ndt.iupui.mlab1.lga06.measurement-lab.org
NDT-SSL uses:
ndt-iupui-mlab1-lga06.measurement-lab.org
We rewrite the dotted FQDNs to use dashes so that NDT-SSL/ndt7 work
properly. This is intended to be a temporary workaround until we can
find a solution that does not require NDT-SSL/ndt7 to be special cases
from mlab-ns's perspective.
See https://github.com/m-lab/mlab-ns/issues/48 for more information.
Args:
fqdn: An NDT-SSL or ndt7 FQDN in dotted notation.
Returns:
FQDN with rewritten dashes if a rewrite was necessary, the original FQDN
otherwise.
"""
fqdn_parts = _split_fqdn(fqdn)
# Create subdomain like ndt-iupui-mlab1-lga06
subdomain = '-'.join(fqdn_parts[:-2])
return '.'.join((subdomain, fqdn_parts[-2], fqdn_parts[-1]))
def _split_fqdn(fqdn):
return fqdn.split('.')
|
from numpy.core.numeric import NaN
from src.Point import Point
from .RansacLineInfo import RansacLineInfo
import numpy as np
from skimage.measure import LineModelND, ransac
from typing import List
from sklearn.neighbors import KDTree
import statistics
import simplegeometry as sg
from .StoppingCriteria import StoppingCriteria
import math
class RansacLineFinder(object):
"""Sequentially finds line from the given points using the RANSAC algorithm"""
def __init__(self, pixels:np.ndarray,width:float,height:float,nnd_threshold_factor:float):
self.__width=width
self.__height=height
self.__all_black_points=pixels
self.__max_models_to_find=NaN
self.__min_inliers_allowed=3 # A line is selected only if it has these many inliers
self.__min_samples=3 #RANSAC parameter - The minimum number of data points to fit a model to
self.__mean_nne_threshold_factor=nnd_threshold_factor #This will be multiplied by the mean nearest neighbour distance. 0.5, 0.25 are good values
self.compute_max_ransac_trials()
self.__stopping_criteria:StoppingCriteria=StoppingCriteria.MAX_OBJECTS
self.__counter=0
self.__first_ransac_threshold=NaN
self.__current_ransac_threshold=NaN
self.__ransac_threshold_spike_factor=NaN
@property
def stopping_criteria(self)->StoppingCriteria:
"""The stopping_criteria property."""
return self.__stopping_criteria
@stopping_criteria.setter
def stopping_criteria(self, value)->StoppingCriteria:
self.__stopping_criteria = value
@property
def max_models(self):
"""The total no of Lines to find. This is used for the stopping criteria"""
return self.__max_models_to_find
@max_models.setter
def max_models(self, value):
self.__max_models_to_find = value
@property
def ransac_threshold_spike_factor(self):
"""Used as a stopping criteria. When the ratio of the current ransac threshold to the original ransac threshold exceeds this value, the search is stopped"""
return self.__ransac_threshold_spike_factor
@ransac_threshold_spike_factor.setter
def ransac_threshold_spike_factor(self, value):
self.__ransac_threshold_spike_factor = value
def compute_max_ransac_trials(self):
count_of_pixels=len(self.__all_black_points)
self.__MAX_RANSAC_TRIALS=int(count_of_pixels*(count_of_pixels-1)/2)
pass
'''
Use the mean nearest neighbour distance to arrive at the RANSAC threshold
The function returns a tuple (mean_nearest_neighbour_distance, ransac_threshold)
'''
def determine_ransac_threshold(self,points:np.ndarray)->float:
tree = KDTree(points)
nearest_dist, nearest_ind = tree.query(points, k=2) # k=2 nearest neighbors where k1 = identity
mean_distances_current_iterations=list(nearest_dist[0:,1:].flatten())
mean=statistics.mean(mean_distances_current_iterations)
ransac_thresold= mean * self.__mean_nne_threshold_factor
return (mean,ransac_thresold)
def validateparams(self):
if (self.stopping_criteria == StoppingCriteria.RANSAC_THRESHOLD_SPIKE):
if (math.isnan(self.ransac_threshold_spike_factor)):
raise Exception("The property ransac threshold spike factor has not been set")
elif (self.stopping_criteria == StoppingCriteria.MAX_OBJECTS):
if (math.isnan(self.max_models)):
raise Exception("The property max models has not been set")
else:
raise Exception(f"Invalid stopping criteria:{self.stopping_criteria}")
def find(self)->List[RansacLineInfo]:
self.validateparams()
print(f"Starting RANSAC line determination using max trials={self.__MAX_RANSAC_TRIALS}, stopping criteria={self.stopping_criteria}")
line_results:List[RansacLineInfo]=[]
starting_points=self.__all_black_points
all_inliers=[]
###
while True:
if (len(starting_points) <= self.__min_samples):
print("No more points available. Terminating search for RANSAC")
break
(mean_nnd,self.__current_ransac_threshold)=self.determine_ransac_threshold(starting_points)
inlier_points,inliers_removed_from_starting,model=self.__extract_first_ransac_line(starting_points,max_distance=self.__current_ransac_threshold)
if (self.__counter==0):
self.__first_ransac_threshold=self.__current_ransac_threshold
if (len(inlier_points) < self.__min_inliers_allowed):
print("Not sufficeint inliers found %d , threshold=%d, therefore halting" % (len(inlier_points),self.__min_inliers_allowed))
break
canstop=self.evaluate_ifcanstop()
if (canstop):
break
all_inliers.extend(inlier_points)
starting_points=inliers_removed_from_starting
line_equation=self.create_linemodel_from_scikit_model(model)
all_possible_inliers=self.find_inliers_from_all_points(line_equation=line_equation, threshold=self.__current_ransac_threshold)
ransac_model=RansacLineInfo(all_possible_inliers,model)
ransac_model.mean_nnd=mean_nnd
ransac_model.ransac_threshold=self.__current_ransac_threshold
line_results.append(ransac_model)
print(f"\tFound RANSAC line with {len(ransac_model.inliers)} inliers, line number {self.__counter},mean nnnd={mean_nnd} ,nnd_threshold={self.__mean_nne_threshold_factor},ransac_threshold={self.__current_ransac_threshold},line info:{ransac_model}" )
print(f"---------")
self.__counter+=1
return line_results
def evaluate_ifcanstop(self):
if (self.__stopping_criteria == StoppingCriteria.RANSAC_THRESHOLD_SPIKE):
delta_threshold=self.__current_ransac_threshold-self.__first_ransac_threshold
ratio=(self.__current_ransac_threshold/self.__first_ransac_threshold)
return ratio>self.__ransac_threshold_spike_factor
elif (self.__stopping_criteria == StoppingCriteria.MAX_OBJECTS):
return (self.__counter >= self.__max_models_to_find)
else:
raise Exception(f"Unsupported stopping criteria:{self.__stopping_criteria}")
def find_inliers_from_all_points(self,line_equation:sg.LineModel, threshold:float)->np.ndarray:
resulting_inlier_tuples=[]
for black_pixel in self.__all_black_points:
black_point=Point(black_pixel[0],black_pixel[1])
perp_distance=line_equation.compute_distance(point=black_point)
if (perp_distance > threshold):
continue
resulting_inlier_tuples.append((black_point.X,black_point.Y))
pass
arr=np.array(resulting_inlier_tuples)
return arr
def create_linemodel_from_scikit_model(self,scikitmodel)->sg.LineModel:
origin=sg.Point(scikitmodel.params[0][0],scikitmodel.params[0][1])
unitvector=sg.Point(scikitmodel.params[1][0],scikitmodel.params[1][1])
first_point=origin
second_point=sg.Point(first_point.X + unitvector.X*10, first_point.Y+unitvector.Y*10)
lineequation=sg.LineModel.create_line_from_2points(first_point.X,first_point.Y, second_point.X, second_point.Y)
return lineequation
def __extract_first_ransac_line(self,data_points, max_distance:int):
"""
Accepts a numpy array with shape N,2 N points, with coordinates x=[0],y=[1]
Returns
A numpy array with shape (N,2), these are the inliers of the just discovered ransac line
All data points with the inliers removed
The model line
"""
model_robust, inliers = ransac(data_points, LineModelND, min_samples=self.__min_samples,residual_threshold=max_distance, max_trials=self.__MAX_RANSAC_TRIALS)
results_inliers=[]
results_inliers_removed=[]
for i in range(0,len(data_points)):
if (inliers[i] == False):
#Not an inlier
results_inliers_removed.append(data_points[i])
continue
x=data_points[i][0]
y=data_points[i][1]
results_inliers.append((x,y))
return np.array(results_inliers), np.array(results_inliers_removed),model_robust
@property
def max_ransac_trials(self):
"""The max_ransac_trials property."""
return self.__MAX_RANSAC_TRIALS
@max_ransac_trials.setter
def max_ransac_trials(self, value):
self.__MAX_RANSAC_TRIALS = value
|
from .movie_library import spearman_corr
from .movie_library import sentiment_boxoffice_all
from .movie_library import sentiment
from .movie_library import tweet_collector
|
import base64
import hashlib
from Crypto.Cipher import AES
from django.conf import settings
class AESEncrypt:
def __init__(self, key: str = settings.KUNMING_PICC_CLUB_AES_KEY):
self.aes = AES.new(self.get_sha1prng_key(key), AES.MODE_ECB)
@staticmethod
def get_sha1prng_key(key: str) -> bytes:
signature: bytes = hashlib.sha1(key.encode()).digest()
signature: bytes = hashlib.sha1(signature).digest()
return signature[:16]
@staticmethod
def padding(s: str) -> str:
pad_num: int = 16 - len(s) % 16
return s + pad_num * chr(pad_num)
@staticmethod
def un_padding(s):
padding_num: int = ord(s[-1])
return s[:-padding_num]
def encrypt(self, s: str):
"""加密函数"""
content_b = self.padding(s).encode("utf-8")
encrypted = self.aes.encrypt(content_b)
return base64.b64encode(encrypted).decode()
def decrypt(self, s: base64):
"""解密函数"""
s = base64.b64decode(s)
s_bytes = self.aes.decrypt(s)
return self.un_padding(s_bytes.decode())
@staticmethod
def s_to_md5(s: str):
return hashlib.md5(s.encode("utf-8")).hexdigest().upper()
|
from typing import Mapping, Union, Optional, Sequence
import numpy as np
from .operation import Operation
from .op_placeholder import OpPlaceholder
from .op_keepdims import OpKeepdims
class OpMin(OpKeepdims):
"""Calculate the minimum of elements."""
def __init__(self,
x: Operation,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
**kwargs):
super(OpMin, self).__init__(self.__class__, x, axis, keepdims, **kwargs)
def _forward(self, feed_dict: Mapping[Union[str, OpPlaceholder], np.ndarray]) -> np.ndarray:
if not self.params['keepdims']:
return self.values[0]
return np.min(self.values[0], axis=self.params['axis'], keepdims=True)
def _backward(self, gradient: np.ndarray) -> None:
if not self.params['keepdims']:
self.gradients = [gradient]
return
self.gradients = [np.equal(self.output, self.values[0]).astype(np.float64) * gradient]
|
from __future__ import print_function
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.io import ascii
import astropy.units as u
c = 299792.458 * u.Unit('km/s')
import matplotlib as mpl
import seaborn as sns
sns.set_style("whitegrid", {'axes.grid' : False})
mpl.rcParams['font.family'] = 'stixgeneral'
mpl.rcParams['font.size'] = 12.
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
import os
import glob
import collections
os.sys.path.insert(0, '/Users/molly/Dropbox/misty/MISTY-pipeline/spectacle')
from spectacle.analysis.statistics import delta_v_90, equivalent_width
from spectacle.analysis import Resample
from spectacle.analysis.line_finding import LineFinder
from spectacle.core.spectrum import Spectrum1DModel
from scipy.signal import argrelextrema
def run_spectacle_on_kodiaq(**kwargs):
plotting = kwargs.get('plotting', False)
threshold = kwargs.get('threshold', 0.02)
# first, read in the dataset
kodiaq_file = 'tab_fit_result.txt'
kodiaq = ascii.read(kodiaq_file)
# output table has Nmin, Ncomp_in, Ncomp_out, EW, dv90
all_data = Table(names=('los', 'z', 'HI_col',
'Si_II_col','Si_II_Nmin','Si_II_Ncomp','Si_II_EW','Si_II_dv90',\
'Si_IV_col','Si_IV_Nmin','Si_IV_Ncomp','Si_IV_EW','Si_IV_dv90',\
'C_IV_col','C_IV_Nmin','C_IV_Ncomp','C_IV_EW','C_IV_dv90',\
'O_VI_col','O_VI_Nmin',"O_VI_Ncomp","O_VI_EW",'O_VI_dv90'), \
dtype=('S16', 'f8', 'f8', # HI
"f8","f8",'f8','f8','f8', # Si II
'f8','f8','f8','f8','f8', # Si IV
'f8','f8','f8','f8','f8', # C IV
'f8',"f8",'f8','f8',"f8")) # O VI
# assume KODIAQ has SiII, SiIV, CIV, OVI per each
# ADD THE DELTA_Vs TO THESE TABLES !!!!
si2_component_data = Table(names=('los', 'z', 'tot_col', 'component', 'comp_col', 'comp_b', 'delta_v'), \
dtype=('S16', 'f8', 'f8', 'i8', 'f8', 'f8', 'f8'))
si4_component_data = Table(names=('los', 'z', 'tot_col', 'component', 'comp_col', 'comp_b', 'delta_v'), \
dtype=('S16', 'f8', 'f8', 'i8', 'f8', 'f8', 'f8'))
c4_component_data = Table(names=('los', 'z', 'tot_col', 'component', 'comp_col', 'comp_b', 'delta_v'), \
dtype=('S16', 'f8', 'f8', 'i8', 'f8', 'f8', 'f8'))
o6_component_data = Table(names=('los', 'z', 'tot_col', 'component', 'comp_col', 'comp_b', 'delta_v'), \
dtype=('S16', 'f8', 'f8', 'i8', 'f8', 'f8', 'f8'))
ion_dict = collections.OrderedDict()
ion_dict['SiII'] = 'Si II 1260'
ion_dict['CIV'] = 'C IV 1548'
ion_dict['SiIV'] = 'Si IV 1394'
ion_dict['OVI'] = 'O VI 1032'
ion_table_name_dict = {'SiII' : si2_component_data, \
'SiIV' : si4_component_data, \
'CIV' : c4_component_data, \
'OVI' : o6_component_data}
redshift = 0.0
print('constructing with redshift = ',redshift,'!!!')
vmin = -600.
vmax = 600.
dv = 1.
velocity = np.arange(vmin, vmax, dv) * u.Unit('km/s')
# group by absorber
kodiaq_los = kodiaq.group_by('z_abs')
for this_los in kodiaq_los.groups:
print('starting ',this_los['Name'][0])
fig = plt.figure(dpi=300)
fig.set_figheight(8)
fig.set_figwidth(6)
gs = gridspec.GridSpec(4, 1)
row = [this_los['Name'][0], this_los['z_abs'][0], this_los['logN_HI'][0]]
these_ions = this_los.group_by(['Ion'])
for i, ion in enumerate(ion_dict.keys()):
ax_spec = fig.add_subplot(gs[i, 0])
mask = these_ions.groups.keys['Ion'] == ion
this_ion = these_ions.groups[mask]
# for each ion in sightline, generate spectrum
spectrum = Spectrum1DModel(redshift=redshift, ion_name=ion_dict[ion])
if(len(this_ion) == 0):
row = row + [-1, -1, -1, -1, -1]
else:
lambda_0 = spectrum.rest_wavelength
with u.set_enabled_equivalencies(u.equivalencies.doppler_relativistic(lambda_0)):
wavelength_rest = velocity.to('Angstrom')
for comp in range(len(this_ion)):
comp_row_start = [this_los['Name'][0], this_los['z_abs'][0]]
delta_v = this_ion['v_i'][comp] * u.Unit('km/s')
col_dens = this_ion['log_N_i'][comp]
v_dop = this_ion['b_i'][comp] * u.Unit('km/s')
print(col_dens, v_dop, delta_v)
spectrum.add_line(column_density=col_dens,
v_doppler=v_dop,
delta_v=delta_v)
this_comp = Spectrum1DModel(redshift=redshift, ion_name=ion_dict[ion])
this_comp.add_line(column_density=col_dens,
v_doppler=v_dop,
delta_v=delta_v)
this_flux = this_comp.flux(velocity)
ax_spec.step(velocity, this_flux, color='#984ea3', alpha=0.5)
# run spectacle and calculate non-parametric measures
flux = spectrum.flux(velocity)
default_values = dict(
bounds={
'column_density': (11, 18), # Global bounds in log,
'v_doppler': (2, 500.) # Global bounds in km/s
}
)
print('*~*~*~*~*~> setting up the LineFinder *~*~*~*~*~>')
print('length of arrays:', len(velocity), len(velocity), len(flux))
line_finder = LineFinder(ion_name = ion_dict[ion],
redshift=redshift,
data_type='flux',
defaults=default_values,
threshold=threshold, # flux decrement has to be > threshold; default 0.01
min_distance=2. * u.Unit('km/s'), # The distance between minima, in dispersion units!
max_iter=2000 # The number of fitter iterations; reduce to speed up fitting at the cost of possibly poorer fits
)
print('*~*~*~*~*~> running the fitter now *~*~*~*~*~>')
spec_mod = line_finder(velocity, flux)
ax_spec.plot(velocity, np.ones(len(velocity)),color='k',lw=1, ls=":")
# ax_spec.step(velocity, flux, color='#984ea3')
# ax_spec.step(velocity, spec_mod.flux(velocity), lw=1, ls="--", dashes=(5, 2), color='darkorange')
ax_spec.text(-550, 0, ion_dict[ion], fontsize=10.)
for k in range(len(this_ion)):
delta_v = this_ion['v_i'][k] * u.Unit('km/s')
ax_spec.plot([delta_v.value, delta_v.value], [1.05, 0.95], color='#984ea3')
plt.xlim(vmin, vmax)
plt.ylim(-0.05, 1.05)
if i < 3:
ax_spec.xaxis.set_major_locator(ticker.NullLocator())
if i == 0:
hi_text = 'HI column = '+str(this_los['logN_HI'][0])
ax_spec.text(-550, 0.9, hi_text, fontsize=10.)
plt.subplots_adjust(wspace=None, hspace=None)
# OK, now save this information as a row in the relevant table
comp_table = spec_mod.stats(velocity)
print(comp_table)
tot_col = np.log10(np.sum(np.power(10.0,this_ion['log_N_i'])))
Nmin = np.size(np.where(flux[argrelextrema(flux, np.less)[0]] < (1-threshold)))
tot_ew = equivalent_width(wavelength_rest, flux, continuum=1.0)
tot_dv90 = delta_v_90(velocity, flux, continuum=1.0)
print("col, EW, dv90 = ", tot_col, tot_ew, tot_dv90)
for ic, comp in enumerate(comp_table):
comp_row = comp_row_start + [tot_col, int(ic), comp['col_dens'], comp['v_dop'].value, comp['delta_v'].value]
ion_table_name_dict[ion].add_row(comp_row)
delta_v = comp['delta_v']
ax_spec.plot([delta_v.value, delta_v.value], [1.05, 0.95], color='darkorange')
this_comp = Spectrum1DModel(redshift=redshift, ion_name=ion_dict[ion])
this_comp.add_line(column_density=comp['col_dens'],
v_doppler=comp['v_dop'],
delta_v=comp['delta_v'])
this_flux = this_comp.flux(velocity)
ax_spec.step(velocity, this_flux, color='darkorange', ls="--", dashes=(5,2), alpha=0.5)
row = row + [tot_col, Nmin, len(comp_table), tot_ew, tot_dv90.value]
all_data.add_row(row)
fig.tight_layout()
outname = 'kodiaq_' + this_los['Name'][0] + '_' + str(this_los['z_abs'][0]) + '.png'
plt.savefig(outname)
outname = 'kodiaq_' + this_los['Name'][0] + '_' + str(this_los['z_abs'][0]) + '.pdf'
plt.savefig(outname)
plt.close(fig)
# and save that info to the all_data table and the individual measures tables
ascii.write(all_data, 'kodiaq_spectacle_all.dat', format='fixed_width', overwrite=True)
ascii.write(si2_component_data, 'kodiaq_spectacle_si2.dat', format='fixed_width', overwrite=True)
ascii.write(si4_component_data, 'kodiaq_spectacle_si4.dat', format='fixed_width', overwrite=True)
ascii.write(c4_component_data, 'kodiaq_spectacle_c4.dat', format='fixed_width', overwrite=True)
ascii.write(o6_component_data, 'kodiaq_spectacle_o6.dat', format='fixed_width', overwrite=True)
# not sure yet how to systematically compare the output fits to the inputs --- N,b vs v?
if __name__ == "__main__":
run_spectacle_on_kodiaq(plotting=False)
|
# SANYAM MITTAL
# CE 42
# 18001003110
import sys
input = sys.stdin.readline
def multi_input():
return map(int, input().split())
def array_print(arr):
print(' '.join(map(str, arr)))
def shortest_path(parent, node, dist, graph, visited):
if visited[node]==0 or distance[node]>distance[parent]+dist:
distance[node] = distance[parent] + dist
visited[node] = 1
for curr_node in graph[node]:
node1 = curr_node[0]
d = curr_node[1]
shortest_path(node, node1, d, graph, visited)
print("Number of vertices")
V = int(input())
graph = {}
for i in range(1,V+1):
graph[i] = []
print("Total number of edges")
edges = int(input())
print("Node 1 - Node 2 - distance")
visited = [0]*(V+1)
distance = [0]*(V+1)
for i in range(edges):
n1, n2, d = multi_input()
graph[n1].append([n2,d])
graph[n2].append([n1,d])
for i in range(1, V+1):
if visited[i]==0:
shortest_path(0, i, 0, graph, visited)
array_print(distance[1:V+1])
|
# y = ax + b
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv("satislar.csv")
data.sample(10)
data.shape
data.columns
data.describe()
data.info()
data.duplicated().sum()
data.isnull().sum()
aylar = data["Aylar"].values.reshape(-1,1)
satislar = data["Satislar"].values.reshape(-1,1)
#scale
sc = StandardScaler()
x = sc.fit_transform(aylar)
y = sc.fit_transform(satislar)
#train test split
x_train, x_test, y_train, y_test = train_test_split(x,y ,test_size=0.2 , random_state=42)
#linear model
lr = LinearRegression()
lr.fit(x_train, y_train)
#tahmin
tahmin = lr.predict(x_test)
#visualize
x_test_data = pd.DataFrame(data= x_test, index= range(x_test.shape[0]), columns = ["aylar"])
y_test_data = pd.DataFrame(data = y_test, index = range(y_test.shape[0]),columns = ["satis"])
tahmin_data = pd.DataFrame(data = tahmin, index = range(tahmin.shape[0]), columns = ["tahmin"])
x_test_data = x_test_data.sort_index()
y_test_data = y_test_data.sort_index()
tahmin_data = tahmin_data.sort_index()
plt.plot(x_test_data["aylar"], y_test_data["satis"],color = "r", label = "Real", marker = 'o')
plt.plot(x_test_data["aylar"],tahmin_data["tahmin"],color = "b",label = "Predict", marker='o')
plt.legend()
plt.show
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pygame
from pygame.locals import *
from sys import exit
from vector import Vec2d
background_image = '../image/sushiplate.jpg'
sprite_image = '../image/fugu.png'
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
background = pygame.image.load(background_image).convert()
sprite = pygame.image.load(sprite_image)
clock = pygame.time.Clock()
sprite_pos = Vec2d(200, 150)
sprite_speed = 300
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
pressed_keys = pygame.key.get_pressed()
key_direction = Vec2d(0, 0)
if pressed_keys[K_LEFT]:
key_direction.x = -1
elif pressed_keys[K_RIGHT]:
key_direction.x = +1
if pressed_keys[K_UP]:
key_direction.y = -1
elif pressed_keys[K_DOWN]:
key_direction.y = +1
key_direction.normalized()
screen.blit(background, (0, 0))
screen.blit(sprite, sprite_pos)
time_passed = clock.tick(30)
time_passed_seconds = time_passed/1000
sprite_pos += key_direction * sprite_speed * time_passed_seconds
pygame.display.update() |
import turtle
turtle.shape('turtle')
n = 1
while n < 360:
turtle.forward(1)
turtle.left(1)
n = n + 1
input()
|
def trojkat(rozmiar):
gwiazdka = "*"
i = 1
while i <= rozmiar:
print(gwiazdka * i)
i += 1
trojkat(2)
trojkat(3)
trojkat(4)
def trojkatOdwrotny(rozmiaro):
for i in range(rozmiaro,0,-1):
print('*' * i)
trojkatOdwrotny(3)
trojkatOdwrotny(4)
trojkatOdwrotny(5)
def trojkatPiramida(h):
s=h-1
spacje = range(h,0,-1)
x = enumerate(spacje)
x = list(x)
x.reverse()
print(x)
h=5
trojkatPiramida(h)
|
# coding=utf-8
import json
import subprocess
import sys
if __name__ == '__main__':
if len(sys.argv) != 2:
exit
fnt = sys.argv[1]
'''
obj = subprocess.check_output(('otfccdump.exe', '-n', '0', '--hex-cmap', fnt)).decode('utf-8', 'ignore')
obj = json.loads(obj.encode('utf-8'))
'''
obj = json.loads(subprocess.check_output(('otfccdump.exe', '-n', '0', '--hex-cmap', '--no-bom', fnt)))
with open('STWCharacters.txt', encoding='utf-8') as f:
for line in f:
st = line.rstrip('\n').split('\t')
if st[0] == st[1]:
continue
s = f'U+{ord(st[0]):4X}'
t = f'U+{ord(st[1]):4X}'
try:
obj['cmap'][s] = obj['cmap'][t]
except:
print('no %s' % st[0])
subprocess.run(['otfccbuild.exe', '-O3', '-o', '%s_TC.ttf' % fnt[0:fnt.rfind('.')]], input=json.dumps(obj), encoding='utf-8')
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('D:\pythonFile\mtest.jpg',0) #直接读为灰度图像
for i in range(2000): #添加点噪声
temp_x = np.random.randint(0,img.shape[0])
temp_y = np.random.randint(0,img.shape[1])
img[temp_x][temp_y] = 255
#9---滤波领域直径
#后面两个数字:空间高斯函数标准差,灰度值相似性标准差
blur = cv2.bilateralFilter(img,9,75,75)
plt.subplot(1,2,1),plt.imshow(img,'gray')#默认彩色,另一种彩色bgr
plt.subplot(1,2,2),plt.imshow(blur,'gray')
#展示img
cv2.imshow("mtest",img)
#展示blur
cv2.imshow("mtest",blur)
#展示时间
cv2.waitKey(1100000) |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import json
import sys
import time
import os
import glob
import shutil
import datetime
import argparse
from OCC.Display.SimpleGui import init_display
from OCC.Core.gp import gp_Pnt, gp_Vec, gp_Dir
from OCC.Core.gp import gp_Ax1, gp_Ax2, gp_Ax3
from OCC.Core.gp import gp_Pnt2d
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox
from OCC.Core.BRepFilletAPI import BRepFilletAPI_MakeChamfer, BRepFilletAPI_MakeFillet
from OCC.Core.BRepMesh import BRepMesh_IncrementalMesh
from OCC.Core.BRepMeshData import BRepMeshData_Face
from OCC.Core.ChFi3d import ChFi3d_Rational
from OCC.Core.TColgp import TColgp_Array1OfPnt2d
from OCC.Core.TopExp import TopExp_Explorer
from OCC.Core.TopAbs import TopAbs_EDGE
from OCC.Core.StlAPI import StlAPI_Reader, StlAPI_Writer
from OCC.Core.IGESControl import IGESControl_Reader, IGESControl_Writer
from OCC.Extend.DataExchange import read_step_file, write_step_file, write_stl_file
from OCCUtils.Construct import make_box
from OCCUtils.Construct import make_line, make_wire, make_edge
from src.base_occ import dispocc
def write_stl_file_mesh1(a_shape, filename, mode="ascii", linear_deflection=0.9, angular_deflection=0.5):
"""
export the shape to a STL file
Be careful, the shape first need to be explicitely meshed using BRepMesh_IncrementalMesh
a_shape: the topods_shape to export
filename: the filename
mode: optional, "ascii" by default. Can either be "binary"
linear_deflection: optional, default to 0.001. Lower, more occurate mesh
angular_deflection: optional, default to 0.5. Lower, more accurate_mesh
"""
if a_shape.IsNull():
raise AssertionError("Shape is null.")
if mode not in ["ascii", "binary"]:
raise AssertionError("mode should be either ascii or binary")
if os.path.isfile(filename):
print("Warning: %s file already exists and will be replaced" % filename)
# first mesh the shape
mesh = BRepMesh_IncrementalMesh(
a_shape, linear_deflection, True, angular_deflection, True)
mesh.SetParallelDefault(True)
mesh.Perform()
if not mesh.IsDone():
raise AssertionError("Mesh is not done.")
stl_exporter = StlAPI_Writer()
if mode == "ascii":
stl_exporter.SetASCIIMode(True)
else: # binary, just set the ASCII flag to False
stl_exporter.SetASCIIMode(False)
stl_exporter.Write(a_shape, filename)
if not os.path.isfile(filename):
raise IOError("File not written to disk.")
def write_stl_file_mesh2(a_shape, filename, mode="ascii", linear_deflection=0.9, angular_deflection=0.5):
"""
export the shape to a STL file
Be careful, the shape first need to be explicitely meshed using BRepMesh_IncrementalMesh
a_shape: the topods_shape to export
filename: the filename
mode: optional, "ascii" by default. Can either be "binary"
linear_deflection: optional, default to 0.001. Lower, more occurate mesh
angular_deflection: optional, default to 0.5. Lower, more accurate_mesh
"""
if a_shape.IsNull():
raise AssertionError("Shape is null.")
if mode not in ["ascii", "binary"]:
raise AssertionError("mode should be either ascii or binary")
if os.path.isfile(filename):
print("Warning: %s file already exists and will be replaced" % filename)
# first mesh the shape
mesh = BRepMesh_IncrementalMesh(
a_shape, linear_deflection, False, angular_deflection, True)
mesh.SetParallelDefault(True)
mesh.Perform()
if not mesh.IsDone():
raise AssertionError("Mesh is not done.")
stl_exporter = StlAPI_Writer()
if mode == "ascii":
stl_exporter.SetASCIIMode(True)
else: # binary, just set the ASCII flag to False
stl_exporter.SetASCIIMode(False)
stl_exporter.Write(a_shape, filename)
if not os.path.isfile(filename):
raise IOError("File not written to disk.")
if __name__ == '__main__':
argvs = sys.argv
parser = argparse.ArgumentParser()
opt = parser.parse_args()
print(opt, argvs)
obj = dispocc()
#
# https://www.opencascade.com/doc/occt-7.4.0/overview/html/occt_user_guides__modeling_algos.html#occt_modalg_6
# https://www.opencascade.com/doc/occt-7.5.0/overview/html/occt_user_guides__modeling_algos.html#occt_modalg_6
#
axs = gp_Ax3()
box = make_box(200, 200, 200)
chf = BRepFilletAPI_MakeChamfer(box)
# chf.Build()
fil = BRepFilletAPI_MakeFillet(box)
fil.SetFilletShape(ChFi3d_Rational)
par = TColgp_Array1OfPnt2d(1, 2)
par.SetValue(1, gp_Pnt2d(-1000, 10))
par.SetValue(2, gp_Pnt2d(1000, 10))
top = TopExp_Explorer(box, TopAbs_EDGE)
fil.Add(par, top.Current())
top.Next()
fil.Add(par, top.Current())
top.Next()
fil.Add(par, top.Current())
write_step_file(box, obj.tmpdir + "box.stp")
write_step_file(fil.Shape(), obj.tmpdir + "box_fillet.stp", "AP214IS")
write_stl_file(fil.Shape(), obj.tmpdir + "box_fillet.stl")
write_stl_file_mesh1(fil.Shape(), obj.tmpdir + "box_fillet_mesh1.stl",
linear_deflection=0.1E-1, angular_deflection=0.1E-1)
write_stl_file_mesh2(fil.Shape(), obj.tmpdir + "box_fillet_mesh2.stl",
linear_deflection=0.1E-1, angular_deflection=0.1E-1)
obj.display.DisplayShape(fil.Shape())
obj.display.DisplayShape(axs.Location())
obj.ShowOCC()
|
# Generated by Django 3.2.6 on 2021-08-21 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('residential', '0008_residentialdetails_title'),
]
operations = [
migrations.AlterField(
model_name='residentialdetails',
name='title',
field=models.CharField(max_length=500),
),
]
|
'''
2gbhosting gozlanurlresolver plugin
Copyright (C) 2011 t0mm0, DragonWin
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from gozlanurlresolver.plugnplay.interfaces import gozlanurlresolver
from gozlanurlresolver.plugnplay.interfaces import PluginSettings
from gozlanurlresolver.plugnplay import Plugin
import re
import urllib2
from gozlanurlresolver import common
import os
class TwogbhostingResolver(Plugin, gozlanurlresolver, PluginSettings):
implements = [gozlanurlresolver, PluginSettings]
name = "2gbhosting"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
data = {}
try:
html = self.net.http_GET(web_url).content
except urllib2.URLError, e:
common.addon.log_error('2gb-hosting: http error %d fetching %s' %
(e.code, web_url))
return False
r = re.search('<input type="hidden" name="sid" value="(.+?)" />', html)
if r:
sid = r.group(1)
common.addon.log_debug('eg-hosting: found sid' + sid)
else:
common.addon.log_error('2gb-hosting: Could not find sid')
return False
try:
data = { 'sid' : sid,'submit' : 'Click Here To Continue', }
html = self.net.http_POST(web_url, data).content
except urllib2.URLError, e:
common.addon.log_error('2gbhosting: got http error %d fetching %s' %
(e.code, web_url))
return False
r = re.search('swf\|(.+?)\|mpl\|\d+\|(.+?)\|stretching\|autostart\|' +
'jpg\|exactfit\|provider\|write\|lighttpd\|.+?\|' +
'thumbs\|mediaspace\|(.+)\|(.+)\|(.+?)\|image\|files',
html)
if r:
stream_host, url_part4, url_part2, url_part1, ext = r.groups()
stream_url = 'http://%s.2gb-hosting.com/files/%s/%s/2gb/%s.%s' % (
stream_host, url_part1, url_part2, url_part4, ext)
common.addon.log_debug('2gbhosting: streaming url ' + stream_url)
else:
common.addon.log_error('2gbhosting: stream_url not found')
return False
return stream_url
def get_url(self, host, media_id):
return 'http://www.2gb-hosting.com/v/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/v/([0-9a-zA-Z/]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return (re.match('http://(www.)?2gb-hosting.com/v/' +
'[0-9A-Za-z]+/[0-9a-zA-Z]+.*', url) or
'2gb-hosting' in host)
|
from python_imagesearch.imagesearch import imagesearch_loop
from python_imagesearch.imagesearch import imagesearch
from python_imagesearch.imagesearch import imagesearcharea
import pyautogui
import msvcrt as m
import pygetwindow as gw
import time
from time import sleep
import keyboard
timeInterval = 0.5
innerLoop = False
counter = 0
def waitForWindow(windowName):
toc = 0
while toc < 10:
try:
win = gw.getWindowsWithTitle(windowName)[0]
print('Opened after: '+str(toc)+'s')
return True
except:
print('Not open, waiting for '+windowName+'...')
toc = time.perf_counter()
sleep(1)
sleep(1)
def lookAndPress(imageName):
pos = imagesearch(imageName)
if pos[0] != -1:
pyautogui.click(pos[0],pos[1])
else:
print("Could not find: " + imageName)
def lookImageCheck(imageName):
pos = imagesearch(imageName)
if pos[0] != -1:
return True
else:
return False
def waitAndPress(imageName):
pos = imagesearch_loop(imageName, 0.2)
if pos[0] != -1:
pyautogui.click(pos[0],pos[1])
else:
print("Could not find: " + imageName)
def waitImageCheck(imageName):
pos = imagesearch_loop(imageName, 0.2)
if pos[0] != -1:
return True
else:
print("Could not find: " + imageName)
return False
def homeTab():
sleep(timeInterval)
pyautogui.press('alt')
sleep(timeInterval)
pyautogui.press('h')
sleep(timeInterval)
def releaseButton():
pyautogui.hotkey('ctrl', 'f9')
def createWareShipButton():
homeTab()
pyautogui.press('w')
def wareShipLineButton():
homeTab()
pyautogui.press('8')
def showWareDocButton():
pyautogui.hotkey('shift', 'f7')
def nextButton():
pyautogui.hotkey('ctrl', 'pagedown')
def nextButtonCheck():
pos = imagesearch("./nextBlank.png", 1)
if pos[0] != -1:
return True
else:
return False
def createPickButton():
homeTab()
pyautogui.press('k')
def pickLinesButton():
homeTab()
pyautogui.press('s')
def cardButton():
homeTab()
pyautogui.press('c')
def quantityButton():
homeTab()
pyautogui.press('a')
def registerButton():
homeTab()
pyautogui.press('r')
if __name__ == "__main__":
winEditSalesOrder = gw.getWindowsWithTitle('Edit - Sales')[0]
winEditSalesOrder.activate()
nextbuttoncheck = False
while not nextbuttoncheck:
sleep(0.3)
releaseButton()
createWareShipButton()
sleep(1)
if lookImageCheck("./releaseError.png"):
pyautogui.press('enter')
else:
pyautogui.press('enter')
innerLoop = True
if not innerLoop:
counter += 1
if counter > 10:
break
wareShipLineButton()
waitForWindow('Whse. Shipment')
showWareDocButton()
waitForWindow('Warehouse Shipment')
if lookImageCheck("./completelyPicked.png"):
pyautogui.press('esc')
sleep(0.5)
pyautogui.press('esc')
sleep(1)
nextbuttoncheck = nextButtonCheck()
if not nextbuttoncheck:
nextButton()
else:
counter = 0
innerLoop = False
waitForWindow('Warehouse Shipment')
print('about to create pick')
createPickButton()
sleep(1)
pyautogui.hotkey('ctrl', 'enter')
sleep(1)
pyautogui.press('enter')
pickLinesButton()
waitForWindow('Warehouse Activity')
cardButton()
waitForWindow('Warehouse Pick')
quantityButton()
registerButton()
sleep(1)
if lookImageCheck("./yesButton.png"):
pyautogui.press('left')
sleep(timeInterval)
pyautogui.press('enter')
sleep(timeInterval)
elif lookImageCheck("./noBin.png"):
a = keyboard.read_key('d')
if a == 'a':
sleep(sleepTime)
pyautogui.press('esc')
sleep(sleepTime)
pyautogui.press('esc')
elif a != 'd' and a != 'a':
break
sleep(1)
pyautogui.press('esc')
sleep(0.5)
pyautogui.press('esc')
sleep(1)
nextbuttoncheck = nextButtonCheck()
if not nextbuttoncheck:
nextButton() |
from datetime import datetime
from typing import List, cast
from uuid import UUID
from eventsourcing.application import ProcessingEvent
from eventsourcing.examples.cargoshipping.application import BookingApplication
from eventsourcing.examples.cargoshipping.domainmodel import Cargo
from eventsourcing.examples.searchabletimestamps.persistence import (
SearchableTimestampsRecorder,
)
from eventsourcing.persistence import Recording
class SearchableTimestampsApplication(BookingApplication):
def _record(self, processing_event: ProcessingEvent) -> List[Recording]:
event_timestamps_data = [
(e.originator_id, e.timestamp, e.originator_version)
for e in processing_event.events
if isinstance(e, Cargo.Event)
]
processing_event.saved_kwargs["event_timestamps_data"] = event_timestamps_data
return super()._record(processing_event)
def get_cargo_at_timestamp(self, tracking_id: UUID, timestamp: datetime) -> Cargo:
recorder = cast(SearchableTimestampsRecorder, self.recorder)
version = recorder.get_version_at_timestamp(tracking_id, timestamp)
return cast(Cargo, self.repository.get(tracking_id, version=version))
|
from joblib import Parallel, delayed
from farm_energy.layout import read_layout
from power_models import power_v90 as power
from site_conditions.wind_conditions.windrose import read_windrose
from wake_models import jensen_1angle, ainslie_1angle, larsen_1angle, ainsliefull_1angle
def jensen_windrose(layout_file, windrose_file):
layout_x, layout_y = read_layout(layout_file)
wind_direction, wind_speed, wind_frequency = read_windrose(windrose_file)
nt = len(layout_y)
U = Parallel(n_jobs=-1)(delayed(jensen_1angle)(layout_x, layout_y, wind_speed[i], wind_direction[i], rotor_radius=40.0, k=0.04) for i in range(len(wind_direction)))
# print U
P = [[power(u) for u in U[i]] for i in range(len(wind_direction))]
profit = [sum(powers) for powers in P]
efficiency = [profit[ii] * 100.0 / (float(nt) * max(P[ii])) for ii in range(len(wind_direction))] # same as using U0
efficiency_proportion = [efficiency[i] * wind_frequency[i] / 100.0 for i in range(len(wind_direction))]
summation = sum(efficiency_proportion)
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
return profit
def ainslie_windrose(layout_file, windrose_file):
layout_x, layout_y = read_layout(layout_file)
wind_direction, wind_speed, wind_frequency = read_windrose(windrose_file)
nt = len(layout_y)
U = Parallel(n_jobs=-1)(delayed(ainslie_1angle)(layout_x, layout_y, wind_speed[i], wind_direction[i], rotor_radius=40.0, TI=0.08) for i in range(len(wind_direction)))
# print U
P = [[power(u) for u in U[i]] for i in range(len(wind_direction))]
profit = [sum(powers) for powers in P]
efficiency = [profit[ii] * 100.0 / (float(nt) * max(P[ii])) for ii in range(len(wind_direction))] # same as using U0
efficiency_proportion = [efficiency[i] * wind_frequency[i] / 100.0 for i in range(len(wind_direction))]
summation = sum(efficiency_proportion)
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
return profit
def ainsliefull_windrose(layout_file, windrose_file):
layout_x, layout_y = read_layout(layout_file)
wind_direction, wind_speed, wind_frequency = read_windrose(windrose_file)
nt = len(layout_y)
U = Parallel(n_jobs=-1)(delayed(ainsliefull_1angle)(layout_x, layout_y, wind_speed[i], wind_direction[i], rotor_radius=40.0, TI=0.08) for i in range(len(wind_direction)))
P = [[power(u) for u in U[i]] for i in range(len(wind_direction))]
profit = [sum(powers) for powers in P]
efficiency = [profit[ii] * 100.0 / (float(nt) * max(P[ii])) for ii in range(len(wind_direction))] # same as using U0
efficiency_proportion = [efficiency[i] * wind_frequency[i] / 100.0 for i in range(len(wind_direction))]
summation = sum(efficiency_proportion)
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
return profit
def larsen_windrose(layout_file, windrose_file):
layout_x, layout_y = read_layout(layout_file)
wind_direction, wind_speed, wind_frequency = read_windrose(windrose_file)
nt = len(layout_y)
U = Parallel(n_jobs=-1)(delayed(larsen_1angle)(layout_x, layout_y, wind_speed[i], wind_direction[i], rotor_radius=40.0, hub_height=100.0, TI=0.08) for i in range(len(wind_direction)))
# print U
P = [[power(u) for u in U[i]] for i in range(len(wind_direction))]
profit = [sum(powers) for powers in P]
efficiency = [profit[ii] * 100.0 / (float(nt) * max(P[ii])) for ii in range(len(wind_direction))] # same as using U0
efficiency_proportion = [efficiency[i] * wind_frequency[i] / 100.0 for i in range(len(wind_direction))]
summation = sum(efficiency_proportion)
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
return profit
if __name__ == '__main__':
from time import time
# start = time()
# res = jensen_windrose('coordinates.dat', 'windrose2.dat')
# print time() - start
# with open('profit30_jensen_parallel.dat', 'w', 1) as angles:
# for i in range(len(res)):
# angles.write('{0}\n'.format(res[i]))
start = time()
res = ainslie_windrose('coordinates.dat', 'windrose.dat')
print time() - start
with open('profit_ainslie_parallel_meander.dat', 'w', 1) as angles:
for i in range(len(res)):
angles.write('{0}\n'.format(res[i]))
# start = time()
# res = larsen_windrose('coordinates.dat', 'windrose2.dat')
# print time() - start
# with open('profit30_larsen_parallel.dat', 'w', 1) as angles:
# for i in range(len(res)):
# angles.write('{0}\n'.format(res[i]))
# start = time()
# res = ainsliefull_windrose('coordinates.dat', 'windrose2.dat')
# print time() - start
# with open('profit30_ainsfull_parallel.dat', 'a', 1) as angles:
# for i in range(len(res)):
# angles.write('{0}\n'.format(res[i]))
|
# -*- coding: utf-8 -*-
# © 2018 Hideki Yamamoto
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
from datetime import datetime
class ees_group_tagging_multi_action(models.TransientModel):
_name = 'ees_group_tagging.multi_tag_action'
categories = fields.Many2many('res.partner.category',string="Tags")
partners = fields.Many2many('res.partner', string="Partners")
@api.depends('categories','partners')
def do_tag_all(self):
if self.partners:
if self.categories:
for c in self.categories:
for p in self.partners:
p.update({'category_id': [[4,c.id]]})
@api.depends('categories','partners')
def do_untag_all(self):
if self.partners:
if self.categories:
for c in self.categories:
for p in self.partners:
p.update({'category_id': [[3,c.id]]})
@api.multi
def create_wizard(self):
wizard_id = self.create({})
wizard_id.partners = self.env.context.get('active_ids', []) or []
return {
'name': 'Multi Tag Action',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'ees_group_tagging.multi_tag_action',
'res_id': wizard_id.id,
'type': 'ir.actions.act_window',
'target': 'new',
'context': self.env.context
} |
#!/usr/bin/env python
from kafka import KafkaProducer
from flask import Flask, request
from flask import json
app = Flask(__name__)
producer = KafkaProducer(bootstrap_servers='kafka:29092')
def log_to_kafka(topic, event):
"""
This function will first add some metadata (such as: Host, User-Agent, etc) to our generated events and then encode it into binary and dump it into Kafka
"""
event.update(request.headers)
event.update({'remote_addr': request.remote_addr})
producer.send(topic, json.dumps(event).encode())
@app.route("/")
def default_response():
"""
We provide a GET and a POST API request form. The user can either use the default ("/") GET method or the POST method (where the user can add some JSON text. Both of those will be send to Kafka through the log_to_kafka function and will return a message to the user.
"""
default_event = {'event_type': 'default'}
log_to_kafka('userItems', default_event)
return "This is the default response!\n"
@app.route("/purchase_sword", methods = ['GET','POST'])
def purchase_sword():
"""
We provide a GET and a POST API request form. The user can either use the purchase_a_sword GET method (and choose a sword color) or the POST method (where the user can add some JSON text. Both of those will be send to Kafka through the log_to_kafka function and will return a message to the user.
"""
if request.method == 'GET':
sword_event = {'event_type': 'purchase_a_sword'}
log_to_kafka('userItems', sword_event)
return "Sword Purchased\n"
else:
if request.headers['Content-Type'] == 'application/json':
sword_event = {'event_type': 'purchase_a_sword:' + ' '+ json.dumps(request.json)}
log_to_kafka('userItems', sword_event)
return "Sword Purchased: " + json.dumps(request.json) + "\n"
@app.route("/join_a_guild", methods = ['GET','POST'])
def join_guild():
"""
We provide a GET and a POST API request form. The user can either use the join_a_guild GET method or the POST method (where the user can add some JSON text. Both of those will be send to Kafka through the log_to_kafka function and will return a message to the user.
"""
if request.method == 'GET':
join_guild_event = {'event_type': 'join_guild'}
log_to_kafka('userItems', join_guild_event)
return "Join a Guild!\n"
else:
if request.headers['Content-Type'] == 'application/json':
join_guild_event = {'event_type': 'join_guild'+ ' '+ json.dumps(request.json)}
log_to_kafka('userItems', join_guild_event)
return "Join a guild!" + json.dumps(request.json) + "\n"
@app.route("/get_coins", methods = ['GET','POST'])
def get_coins():
"""
We provide a GET and a POST API request form. The user can either use the get_coins GET method (and choose a number of coins) or the POST method (where the user can add some JSON text. Both of those will be send to Kafka through the log_to_kafka function and will return a message to the user.
"""
if request.method == 'GET':
get_coins_event = {'event_type': 'get_coins'}
log_to_kafka('userItems', get_coins_event)
return "Get Coins\n"
else:
if request.headers['Content-Type'] == 'application/json':
get_coins_event = {'event_type': 'get_coins' + ' '+ json.dumps(request.json)}
log_to_kafka('userItems', get_coins_event)
return 'Get ' + json.dumps(request.json)[9:-1] + ' coins\n'
|
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import pdb
def Entropy(input_):
bs = input_.size(0)
entropy = -input_ * torch.log(input_ + 1e-7)
entropy = torch.sum(entropy, dim=1)
return entropy
def grl_hook(coeff):
def fun1(grad):
return -coeff*grad.clone()
return fun1
def DANN(features, ad_net, entropy=None, coeff=None, cls_weight=None, len_share=0):
ad_out = ad_net(features)
train_bs = (ad_out.size(0) - len_share) // 2
dc_target = torch.from_numpy(np.array([[1]] * train_bs + [[0]] * (train_bs + len_share))).float().cuda()
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0 + torch.exp(-entropy)
else:
entropy = torch.ones(ad_out.size(0)).cuda()
source_mask = torch.ones_like(entropy)
source_mask[train_bs : 2 * train_bs] = 0
source_weight = entropy * source_mask
source_weight = source_weight * cls_weight
target_mask = torch.ones_like(entropy)
target_mask[0 : train_bs] = 0
target_mask[2 * train_bs::] = 0
target_weight = entropy * target_mask
target_weight = target_weight * cls_weight
weight = (1.0 + len_share / train_bs) * source_weight / (torch.sum(source_weight).detach().item()) + \
target_weight / torch.sum(target_weight).detach().item()
weight = weight.view(-1, 1)
return torch.sum(weight * nn.BCELoss(reduction='none')(ad_out, dc_target)) / (1e-8 + torch.sum(weight).detach().item())
def marginloss(yHat, y, classes=65, alpha=1, weight=None):
batch_size = len(y)
classes = classes
yHat = F.softmax(yHat, dim=1)
Yg = torch.gather(yHat, 1, torch.unsqueeze(y, 1))#.detach()
Yg_ = (1 - Yg) + 1e-7 # avoiding numerical issues (first)
Px = yHat / Yg_.view(len(yHat), 1)
Px_log = torch.log(Px + 1e-10) # avoiding numerical issues (second)
y_zerohot = torch.ones(batch_size, classes).scatter_(
1, y.view(batch_size, 1).data.cpu(), 0)
output = Px * Px_log * y_zerohot.cuda()
loss = torch.sum(output, dim=1)/ np.log(classes - 1)
Yg_ = Yg_ ** alpha
if weight is not None:
weight *= (Yg_.view(len(yHat), )/ Yg_.sum())
else:
weight = (Yg_.view(len(yHat), )/ Yg_.sum())
weight = weight.detach()
loss = torch.sum(weight * loss) / torch.sum(weight)
return loss |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.